Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/pandas/_config/__init__.py +57 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_config/localization.py +172 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/__init__.py +197 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/_constants.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/_optional.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/pickle_compat.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/pyarrow.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/_constants.py +30 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/_optional.py +168 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/compressors.py +77 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py +53 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/function.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/numpy/function.py +418 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/pickle_compat.py +262 -0
- env-llmeval/lib/python3.10/site-packages/pandas/compat/pyarrow.py +27 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_api.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_arithmetic.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_constructors.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_cumulative.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_formats.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_iteration.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_logical_ops.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_missing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_npfuncs.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_reductions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_subclass.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_ufunc.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_unary.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_validate.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py +258 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_dt_accessor.py +843 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py +129 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py +9 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py +25 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_struct_accessor.py +196 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/indexing/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_datetime.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/pandas/_config/__init__.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
pandas._config is considered explicitly upstream of everything else in pandas,
|
3 |
+
should have no intra-pandas dependencies.
|
4 |
+
|
5 |
+
importing `dates` and `display` ensures that keys needed by _libs
|
6 |
+
are initialized.
|
7 |
+
"""
|
8 |
+
__all__ = [
|
9 |
+
"config",
|
10 |
+
"detect_console_encoding",
|
11 |
+
"get_option",
|
12 |
+
"set_option",
|
13 |
+
"reset_option",
|
14 |
+
"describe_option",
|
15 |
+
"option_context",
|
16 |
+
"options",
|
17 |
+
"using_copy_on_write",
|
18 |
+
"warn_copy_on_write",
|
19 |
+
]
|
20 |
+
from pandas._config import config
|
21 |
+
from pandas._config import dates # pyright: ignore[reportUnusedImport] # noqa: F401
|
22 |
+
from pandas._config.config import (
|
23 |
+
_global_config,
|
24 |
+
describe_option,
|
25 |
+
get_option,
|
26 |
+
option_context,
|
27 |
+
options,
|
28 |
+
reset_option,
|
29 |
+
set_option,
|
30 |
+
)
|
31 |
+
from pandas._config.display import detect_console_encoding
|
32 |
+
|
33 |
+
|
34 |
+
def using_copy_on_write() -> bool:
|
35 |
+
_mode_options = _global_config["mode"]
|
36 |
+
return (
|
37 |
+
_mode_options["copy_on_write"] is True
|
38 |
+
and _mode_options["data_manager"] == "block"
|
39 |
+
)
|
40 |
+
|
41 |
+
|
42 |
+
def warn_copy_on_write() -> bool:
|
43 |
+
_mode_options = _global_config["mode"]
|
44 |
+
return (
|
45 |
+
_mode_options["copy_on_write"] == "warn"
|
46 |
+
and _mode_options["data_manager"] == "block"
|
47 |
+
)
|
48 |
+
|
49 |
+
|
50 |
+
def using_nullable_dtypes() -> bool:
|
51 |
+
_mode_options = _global_config["mode"]
|
52 |
+
return _mode_options["nullable_dtypes"]
|
53 |
+
|
54 |
+
|
55 |
+
def using_pyarrow_string_dtype() -> bool:
|
56 |
+
_mode_options = _global_config["future"]
|
57 |
+
return _mode_options["infer_string"]
|
env-llmeval/lib/python3.10/site-packages/pandas/_config/localization.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Helpers for configuring locale settings.
|
3 |
+
|
4 |
+
Name `localization` is chosen to avoid overlap with builtin `locale` module.
|
5 |
+
"""
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
from contextlib import contextmanager
|
9 |
+
import locale
|
10 |
+
import platform
|
11 |
+
import re
|
12 |
+
import subprocess
|
13 |
+
from typing import TYPE_CHECKING
|
14 |
+
|
15 |
+
from pandas._config.config import options
|
16 |
+
|
17 |
+
if TYPE_CHECKING:
|
18 |
+
from collections.abc import Generator
|
19 |
+
|
20 |
+
|
21 |
+
@contextmanager
|
22 |
+
def set_locale(
|
23 |
+
new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL
|
24 |
+
) -> Generator[str | tuple[str, str], None, None]:
|
25 |
+
"""
|
26 |
+
Context manager for temporarily setting a locale.
|
27 |
+
|
28 |
+
Parameters
|
29 |
+
----------
|
30 |
+
new_locale : str or tuple
|
31 |
+
A string of the form <language_country>.<encoding>. For example to set
|
32 |
+
the current locale to US English with a UTF8 encoding, you would pass
|
33 |
+
"en_US.UTF-8".
|
34 |
+
lc_var : int, default `locale.LC_ALL`
|
35 |
+
The category of the locale being set.
|
36 |
+
|
37 |
+
Notes
|
38 |
+
-----
|
39 |
+
This is useful when you want to run a particular block of code under a
|
40 |
+
particular locale, without globally setting the locale. This probably isn't
|
41 |
+
thread-safe.
|
42 |
+
"""
|
43 |
+
# getlocale is not always compliant with setlocale, use setlocale. GH#46595
|
44 |
+
current_locale = locale.setlocale(lc_var)
|
45 |
+
|
46 |
+
try:
|
47 |
+
locale.setlocale(lc_var, new_locale)
|
48 |
+
normalized_code, normalized_encoding = locale.getlocale()
|
49 |
+
if normalized_code is not None and normalized_encoding is not None:
|
50 |
+
yield f"{normalized_code}.{normalized_encoding}"
|
51 |
+
else:
|
52 |
+
yield new_locale
|
53 |
+
finally:
|
54 |
+
locale.setlocale(lc_var, current_locale)
|
55 |
+
|
56 |
+
|
57 |
+
def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool:
|
58 |
+
"""
|
59 |
+
Check to see if we can set a locale, and subsequently get the locale,
|
60 |
+
without raising an Exception.
|
61 |
+
|
62 |
+
Parameters
|
63 |
+
----------
|
64 |
+
lc : str
|
65 |
+
The locale to attempt to set.
|
66 |
+
lc_var : int, default `locale.LC_ALL`
|
67 |
+
The category of the locale being set.
|
68 |
+
|
69 |
+
Returns
|
70 |
+
-------
|
71 |
+
bool
|
72 |
+
Whether the passed locale can be set
|
73 |
+
"""
|
74 |
+
try:
|
75 |
+
with set_locale(lc, lc_var=lc_var):
|
76 |
+
pass
|
77 |
+
except (ValueError, locale.Error):
|
78 |
+
# horrible name for a Exception subclass
|
79 |
+
return False
|
80 |
+
else:
|
81 |
+
return True
|
82 |
+
|
83 |
+
|
84 |
+
def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]:
|
85 |
+
"""
|
86 |
+
Return a list of normalized locales that do not throw an ``Exception``
|
87 |
+
when set.
|
88 |
+
|
89 |
+
Parameters
|
90 |
+
----------
|
91 |
+
locales : str
|
92 |
+
A string where each locale is separated by a newline.
|
93 |
+
normalize : bool
|
94 |
+
Whether to call ``locale.normalize`` on each locale.
|
95 |
+
|
96 |
+
Returns
|
97 |
+
-------
|
98 |
+
valid_locales : list
|
99 |
+
A list of valid locales.
|
100 |
+
"""
|
101 |
+
return [
|
102 |
+
loc
|
103 |
+
for loc in (
|
104 |
+
locale.normalize(loc.strip()) if normalize else loc.strip()
|
105 |
+
for loc in locales
|
106 |
+
)
|
107 |
+
if can_set_locale(loc)
|
108 |
+
]
|
109 |
+
|
110 |
+
|
111 |
+
def get_locales(
|
112 |
+
prefix: str | None = None,
|
113 |
+
normalize: bool = True,
|
114 |
+
) -> list[str]:
|
115 |
+
"""
|
116 |
+
Get all the locales that are available on the system.
|
117 |
+
|
118 |
+
Parameters
|
119 |
+
----------
|
120 |
+
prefix : str
|
121 |
+
If not ``None`` then return only those locales with the prefix
|
122 |
+
provided. For example to get all English language locales (those that
|
123 |
+
start with ``"en"``), pass ``prefix="en"``.
|
124 |
+
normalize : bool
|
125 |
+
Call ``locale.normalize`` on the resulting list of available locales.
|
126 |
+
If ``True``, only locales that can be set without throwing an
|
127 |
+
``Exception`` are returned.
|
128 |
+
|
129 |
+
Returns
|
130 |
+
-------
|
131 |
+
locales : list of strings
|
132 |
+
A list of locale strings that can be set with ``locale.setlocale()``.
|
133 |
+
For example::
|
134 |
+
|
135 |
+
locale.setlocale(locale.LC_ALL, locale_string)
|
136 |
+
|
137 |
+
On error will return an empty list (no locale available, e.g. Windows)
|
138 |
+
|
139 |
+
"""
|
140 |
+
if platform.system() in ("Linux", "Darwin"):
|
141 |
+
raw_locales = subprocess.check_output(["locale", "-a"])
|
142 |
+
else:
|
143 |
+
# Other platforms e.g. windows platforms don't define "locale -a"
|
144 |
+
# Note: is_platform_windows causes circular import here
|
145 |
+
return []
|
146 |
+
|
147 |
+
try:
|
148 |
+
# raw_locales is "\n" separated list of locales
|
149 |
+
# it may contain non-decodable parts, so split
|
150 |
+
# extract what we can and then rejoin.
|
151 |
+
split_raw_locales = raw_locales.split(b"\n")
|
152 |
+
out_locales = []
|
153 |
+
for x in split_raw_locales:
|
154 |
+
try:
|
155 |
+
out_locales.append(str(x, encoding=options.display.encoding))
|
156 |
+
except UnicodeError:
|
157 |
+
# 'locale -a' is used to populated 'raw_locales' and on
|
158 |
+
# Redhat 7 Linux (and maybe others) prints locale names
|
159 |
+
# using windows-1252 encoding. Bug only triggered by
|
160 |
+
# a few special characters and when there is an
|
161 |
+
# extensive list of installed locales.
|
162 |
+
out_locales.append(str(x, encoding="windows-1252"))
|
163 |
+
|
164 |
+
except TypeError:
|
165 |
+
pass
|
166 |
+
|
167 |
+
if prefix is None:
|
168 |
+
return _valid_locales(out_locales, normalize)
|
169 |
+
|
170 |
+
pattern = re.compile(f"{prefix}.*")
|
171 |
+
found = pattern.findall("\n".join(out_locales))
|
172 |
+
return _valid_locales(found, normalize)
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/__init__.py
ADDED
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
compat
|
3 |
+
======
|
4 |
+
|
5 |
+
Cross-compatible functions for different versions of Python.
|
6 |
+
|
7 |
+
Other items:
|
8 |
+
* platform checker
|
9 |
+
"""
|
10 |
+
from __future__ import annotations
|
11 |
+
|
12 |
+
import os
|
13 |
+
import platform
|
14 |
+
import sys
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from pandas.compat._constants import (
|
18 |
+
IS64,
|
19 |
+
ISMUSL,
|
20 |
+
PY310,
|
21 |
+
PY311,
|
22 |
+
PY312,
|
23 |
+
PYPY,
|
24 |
+
)
|
25 |
+
import pandas.compat.compressors
|
26 |
+
from pandas.compat.numpy import is_numpy_dev
|
27 |
+
from pandas.compat.pyarrow import (
|
28 |
+
pa_version_under10p1,
|
29 |
+
pa_version_under11p0,
|
30 |
+
pa_version_under13p0,
|
31 |
+
pa_version_under14p0,
|
32 |
+
pa_version_under14p1,
|
33 |
+
pa_version_under16p0,
|
34 |
+
)
|
35 |
+
|
36 |
+
if TYPE_CHECKING:
|
37 |
+
from pandas._typing import F
|
38 |
+
|
39 |
+
|
40 |
+
def set_function_name(f: F, name: str, cls: type) -> F:
|
41 |
+
"""
|
42 |
+
Bind the name/qualname attributes of the function.
|
43 |
+
"""
|
44 |
+
f.__name__ = name
|
45 |
+
f.__qualname__ = f"{cls.__name__}.{name}"
|
46 |
+
f.__module__ = cls.__module__
|
47 |
+
return f
|
48 |
+
|
49 |
+
|
50 |
+
def is_platform_little_endian() -> bool:
|
51 |
+
"""
|
52 |
+
Checking if the running platform is little endian.
|
53 |
+
|
54 |
+
Returns
|
55 |
+
-------
|
56 |
+
bool
|
57 |
+
True if the running platform is little endian.
|
58 |
+
"""
|
59 |
+
return sys.byteorder == "little"
|
60 |
+
|
61 |
+
|
62 |
+
def is_platform_windows() -> bool:
|
63 |
+
"""
|
64 |
+
Checking if the running platform is windows.
|
65 |
+
|
66 |
+
Returns
|
67 |
+
-------
|
68 |
+
bool
|
69 |
+
True if the running platform is windows.
|
70 |
+
"""
|
71 |
+
return sys.platform in ["win32", "cygwin"]
|
72 |
+
|
73 |
+
|
74 |
+
def is_platform_linux() -> bool:
|
75 |
+
"""
|
76 |
+
Checking if the running platform is linux.
|
77 |
+
|
78 |
+
Returns
|
79 |
+
-------
|
80 |
+
bool
|
81 |
+
True if the running platform is linux.
|
82 |
+
"""
|
83 |
+
return sys.platform == "linux"
|
84 |
+
|
85 |
+
|
86 |
+
def is_platform_mac() -> bool:
|
87 |
+
"""
|
88 |
+
Checking if the running platform is mac.
|
89 |
+
|
90 |
+
Returns
|
91 |
+
-------
|
92 |
+
bool
|
93 |
+
True if the running platform is mac.
|
94 |
+
"""
|
95 |
+
return sys.platform == "darwin"
|
96 |
+
|
97 |
+
|
98 |
+
def is_platform_arm() -> bool:
|
99 |
+
"""
|
100 |
+
Checking if the running platform use ARM architecture.
|
101 |
+
|
102 |
+
Returns
|
103 |
+
-------
|
104 |
+
bool
|
105 |
+
True if the running platform uses ARM architecture.
|
106 |
+
"""
|
107 |
+
return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith(
|
108 |
+
"armv"
|
109 |
+
)
|
110 |
+
|
111 |
+
|
112 |
+
def is_platform_power() -> bool:
|
113 |
+
"""
|
114 |
+
Checking if the running platform use Power architecture.
|
115 |
+
|
116 |
+
Returns
|
117 |
+
-------
|
118 |
+
bool
|
119 |
+
True if the running platform uses ARM architecture.
|
120 |
+
"""
|
121 |
+
return platform.machine() in ("ppc64", "ppc64le")
|
122 |
+
|
123 |
+
|
124 |
+
def is_ci_environment() -> bool:
|
125 |
+
"""
|
126 |
+
Checking if running in a continuous integration environment by checking
|
127 |
+
the PANDAS_CI environment variable.
|
128 |
+
|
129 |
+
Returns
|
130 |
+
-------
|
131 |
+
bool
|
132 |
+
True if the running in a continuous integration environment.
|
133 |
+
"""
|
134 |
+
return os.environ.get("PANDAS_CI", "0") == "1"
|
135 |
+
|
136 |
+
|
137 |
+
def get_lzma_file() -> type[pandas.compat.compressors.LZMAFile]:
|
138 |
+
"""
|
139 |
+
Importing the `LZMAFile` class from the `lzma` module.
|
140 |
+
|
141 |
+
Returns
|
142 |
+
-------
|
143 |
+
class
|
144 |
+
The `LZMAFile` class from the `lzma` module.
|
145 |
+
|
146 |
+
Raises
|
147 |
+
------
|
148 |
+
RuntimeError
|
149 |
+
If the `lzma` module was not imported correctly, or didn't exist.
|
150 |
+
"""
|
151 |
+
if not pandas.compat.compressors.has_lzma:
|
152 |
+
raise RuntimeError(
|
153 |
+
"lzma module not available. "
|
154 |
+
"A Python re-install with the proper dependencies, "
|
155 |
+
"might be required to solve this issue."
|
156 |
+
)
|
157 |
+
return pandas.compat.compressors.LZMAFile
|
158 |
+
|
159 |
+
|
160 |
+
def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]:
|
161 |
+
"""
|
162 |
+
Importing the `BZ2File` class from the `bz2` module.
|
163 |
+
|
164 |
+
Returns
|
165 |
+
-------
|
166 |
+
class
|
167 |
+
The `BZ2File` class from the `bz2` module.
|
168 |
+
|
169 |
+
Raises
|
170 |
+
------
|
171 |
+
RuntimeError
|
172 |
+
If the `bz2` module was not imported correctly, or didn't exist.
|
173 |
+
"""
|
174 |
+
if not pandas.compat.compressors.has_bz2:
|
175 |
+
raise RuntimeError(
|
176 |
+
"bz2 module not available. "
|
177 |
+
"A Python re-install with the proper dependencies, "
|
178 |
+
"might be required to solve this issue."
|
179 |
+
)
|
180 |
+
return pandas.compat.compressors.BZ2File
|
181 |
+
|
182 |
+
|
183 |
+
__all__ = [
|
184 |
+
"is_numpy_dev",
|
185 |
+
"pa_version_under10p1",
|
186 |
+
"pa_version_under11p0",
|
187 |
+
"pa_version_under13p0",
|
188 |
+
"pa_version_under14p0",
|
189 |
+
"pa_version_under14p1",
|
190 |
+
"pa_version_under16p0",
|
191 |
+
"IS64",
|
192 |
+
"ISMUSL",
|
193 |
+
"PY310",
|
194 |
+
"PY311",
|
195 |
+
"PY312",
|
196 |
+
"PYPY",
|
197 |
+
]
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.71 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/_constants.cpython-310.pyc
ADDED
Binary file (716 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/_optional.cpython-310.pyc
ADDED
Binary file (4.38 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc
ADDED
Binary file (1.74 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/pickle_compat.cpython-310.pyc
ADDED
Binary file (5.67 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/__pycache__/pyarrow.cpython-310.pyc
ADDED
Binary file (857 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/_constants.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
_constants
|
3 |
+
======
|
4 |
+
|
5 |
+
Constants relevant for the Python implementation.
|
6 |
+
"""
|
7 |
+
|
8 |
+
from __future__ import annotations
|
9 |
+
|
10 |
+
import platform
|
11 |
+
import sys
|
12 |
+
import sysconfig
|
13 |
+
|
14 |
+
IS64 = sys.maxsize > 2**32
|
15 |
+
|
16 |
+
PY310 = sys.version_info >= (3, 10)
|
17 |
+
PY311 = sys.version_info >= (3, 11)
|
18 |
+
PY312 = sys.version_info >= (3, 12)
|
19 |
+
PYPY = platform.python_implementation() == "PyPy"
|
20 |
+
ISMUSL = "musl" in (sysconfig.get_config_var("HOST_GNU_TYPE") or "")
|
21 |
+
REF_COUNT = 2 if PY311 else 3
|
22 |
+
|
23 |
+
__all__ = [
|
24 |
+
"IS64",
|
25 |
+
"ISMUSL",
|
26 |
+
"PY310",
|
27 |
+
"PY311",
|
28 |
+
"PY312",
|
29 |
+
"PYPY",
|
30 |
+
]
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/_optional.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import importlib
|
4 |
+
import sys
|
5 |
+
from typing import TYPE_CHECKING
|
6 |
+
import warnings
|
7 |
+
|
8 |
+
from pandas.util._exceptions import find_stack_level
|
9 |
+
|
10 |
+
from pandas.util.version import Version
|
11 |
+
|
12 |
+
if TYPE_CHECKING:
|
13 |
+
import types
|
14 |
+
|
15 |
+
# Update install.rst & setup.cfg when updating versions!
|
16 |
+
|
17 |
+
VERSIONS = {
|
18 |
+
"adbc-driver-postgresql": "0.8.0",
|
19 |
+
"adbc-driver-sqlite": "0.8.0",
|
20 |
+
"bs4": "4.11.2",
|
21 |
+
"blosc": "1.21.3",
|
22 |
+
"bottleneck": "1.3.6",
|
23 |
+
"dataframe-api-compat": "0.1.7",
|
24 |
+
"fastparquet": "2022.12.0",
|
25 |
+
"fsspec": "2022.11.0",
|
26 |
+
"html5lib": "1.1",
|
27 |
+
"hypothesis": "6.46.1",
|
28 |
+
"gcsfs": "2022.11.0",
|
29 |
+
"jinja2": "3.1.2",
|
30 |
+
"lxml.etree": "4.9.2",
|
31 |
+
"matplotlib": "3.6.3",
|
32 |
+
"numba": "0.56.4",
|
33 |
+
"numexpr": "2.8.4",
|
34 |
+
"odfpy": "1.4.1",
|
35 |
+
"openpyxl": "3.1.0",
|
36 |
+
"pandas_gbq": "0.19.0",
|
37 |
+
"psycopg2": "2.9.6", # (dt dec pq3 ext lo64)
|
38 |
+
"pymysql": "1.0.2",
|
39 |
+
"pyarrow": "10.0.1",
|
40 |
+
"pyreadstat": "1.2.0",
|
41 |
+
"pytest": "7.3.2",
|
42 |
+
"python-calamine": "0.1.7",
|
43 |
+
"pyxlsb": "1.0.10",
|
44 |
+
"s3fs": "2022.11.0",
|
45 |
+
"scipy": "1.10.0",
|
46 |
+
"sqlalchemy": "2.0.0",
|
47 |
+
"tables": "3.8.0",
|
48 |
+
"tabulate": "0.9.0",
|
49 |
+
"xarray": "2022.12.0",
|
50 |
+
"xlrd": "2.0.1",
|
51 |
+
"xlsxwriter": "3.0.5",
|
52 |
+
"zstandard": "0.19.0",
|
53 |
+
"tzdata": "2022.7",
|
54 |
+
"qtpy": "2.3.0",
|
55 |
+
"pyqt5": "5.15.9",
|
56 |
+
}
|
57 |
+
|
58 |
+
# A mapping from import name to package name (on PyPI) for packages where
|
59 |
+
# these two names are different.
|
60 |
+
|
61 |
+
INSTALL_MAPPING = {
|
62 |
+
"bs4": "beautifulsoup4",
|
63 |
+
"bottleneck": "Bottleneck",
|
64 |
+
"jinja2": "Jinja2",
|
65 |
+
"lxml.etree": "lxml",
|
66 |
+
"odf": "odfpy",
|
67 |
+
"pandas_gbq": "pandas-gbq",
|
68 |
+
"python_calamine": "python-calamine",
|
69 |
+
"sqlalchemy": "SQLAlchemy",
|
70 |
+
"tables": "pytables",
|
71 |
+
}
|
72 |
+
|
73 |
+
|
74 |
+
def get_version(module: types.ModuleType) -> str:
|
75 |
+
version = getattr(module, "__version__", None)
|
76 |
+
|
77 |
+
if version is None:
|
78 |
+
raise ImportError(f"Can't determine version for {module.__name__}")
|
79 |
+
if module.__name__ == "psycopg2":
|
80 |
+
# psycopg2 appends " (dt dec pq3 ext lo64)" to it's version
|
81 |
+
version = version.split()[0]
|
82 |
+
return version
|
83 |
+
|
84 |
+
|
85 |
+
def import_optional_dependency(
|
86 |
+
name: str,
|
87 |
+
extra: str = "",
|
88 |
+
errors: str = "raise",
|
89 |
+
min_version: str | None = None,
|
90 |
+
):
|
91 |
+
"""
|
92 |
+
Import an optional dependency.
|
93 |
+
|
94 |
+
By default, if a dependency is missing an ImportError with a nice
|
95 |
+
message will be raised. If a dependency is present, but too old,
|
96 |
+
we raise.
|
97 |
+
|
98 |
+
Parameters
|
99 |
+
----------
|
100 |
+
name : str
|
101 |
+
The module name.
|
102 |
+
extra : str
|
103 |
+
Additional text to include in the ImportError message.
|
104 |
+
errors : str {'raise', 'warn', 'ignore'}
|
105 |
+
What to do when a dependency is not found or its version is too old.
|
106 |
+
|
107 |
+
* raise : Raise an ImportError
|
108 |
+
* warn : Only applicable when a module's version is to old.
|
109 |
+
Warns that the version is too old and returns None
|
110 |
+
* ignore: If the module is not installed, return None, otherwise,
|
111 |
+
return the module, even if the version is too old.
|
112 |
+
It's expected that users validate the version locally when
|
113 |
+
using ``errors="ignore"`` (see. ``io/html.py``)
|
114 |
+
min_version : str, default None
|
115 |
+
Specify a minimum version that is different from the global pandas
|
116 |
+
minimum version required.
|
117 |
+
Returns
|
118 |
+
-------
|
119 |
+
maybe_module : Optional[ModuleType]
|
120 |
+
The imported module, when found and the version is correct.
|
121 |
+
None is returned when the package is not found and `errors`
|
122 |
+
is False, or when the package's version is too old and `errors`
|
123 |
+
is ``'warn'`` or ``'ignore'``.
|
124 |
+
"""
|
125 |
+
assert errors in {"warn", "raise", "ignore"}
|
126 |
+
|
127 |
+
package_name = INSTALL_MAPPING.get(name)
|
128 |
+
install_name = package_name if package_name is not None else name
|
129 |
+
|
130 |
+
msg = (
|
131 |
+
f"Missing optional dependency '{install_name}'. {extra} "
|
132 |
+
f"Use pip or conda to install {install_name}."
|
133 |
+
)
|
134 |
+
try:
|
135 |
+
module = importlib.import_module(name)
|
136 |
+
except ImportError:
|
137 |
+
if errors == "raise":
|
138 |
+
raise ImportError(msg)
|
139 |
+
return None
|
140 |
+
|
141 |
+
# Handle submodules: if we have submodule, grab parent module from sys.modules
|
142 |
+
parent = name.split(".")[0]
|
143 |
+
if parent != name:
|
144 |
+
install_name = parent
|
145 |
+
module_to_get = sys.modules[install_name]
|
146 |
+
else:
|
147 |
+
module_to_get = module
|
148 |
+
minimum_version = min_version if min_version is not None else VERSIONS.get(parent)
|
149 |
+
if minimum_version:
|
150 |
+
version = get_version(module_to_get)
|
151 |
+
if version and Version(version) < Version(minimum_version):
|
152 |
+
msg = (
|
153 |
+
f"Pandas requires version '{minimum_version}' or newer of '{parent}' "
|
154 |
+
f"(version '{version}' currently installed)."
|
155 |
+
)
|
156 |
+
if errors == "warn":
|
157 |
+
warnings.warn(
|
158 |
+
msg,
|
159 |
+
UserWarning,
|
160 |
+
stacklevel=find_stack_level(),
|
161 |
+
)
|
162 |
+
return None
|
163 |
+
elif errors == "raise":
|
164 |
+
raise ImportError(msg)
|
165 |
+
else:
|
166 |
+
return None
|
167 |
+
|
168 |
+
return module
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/compressors.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Patched ``BZ2File`` and ``LZMAFile`` to handle pickle protocol 5.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from pickle import PickleBuffer
|
8 |
+
|
9 |
+
from pandas.compat._constants import PY310
|
10 |
+
|
11 |
+
try:
|
12 |
+
import bz2
|
13 |
+
|
14 |
+
has_bz2 = True
|
15 |
+
except ImportError:
|
16 |
+
has_bz2 = False
|
17 |
+
|
18 |
+
try:
|
19 |
+
import lzma
|
20 |
+
|
21 |
+
has_lzma = True
|
22 |
+
except ImportError:
|
23 |
+
has_lzma = False
|
24 |
+
|
25 |
+
|
26 |
+
def flatten_buffer(
|
27 |
+
b: bytes | bytearray | memoryview | PickleBuffer,
|
28 |
+
) -> bytes | bytearray | memoryview:
|
29 |
+
"""
|
30 |
+
Return some 1-D `uint8` typed buffer.
|
31 |
+
|
32 |
+
Coerces anything that does not match that description to one that does
|
33 |
+
without copying if possible (otherwise will copy).
|
34 |
+
"""
|
35 |
+
|
36 |
+
if isinstance(b, (bytes, bytearray)):
|
37 |
+
return b
|
38 |
+
|
39 |
+
if not isinstance(b, PickleBuffer):
|
40 |
+
b = PickleBuffer(b)
|
41 |
+
|
42 |
+
try:
|
43 |
+
# coerce to 1-D `uint8` C-contiguous `memoryview` zero-copy
|
44 |
+
return b.raw()
|
45 |
+
except BufferError:
|
46 |
+
# perform in-memory copy if buffer is not contiguous
|
47 |
+
return memoryview(b).tobytes("A")
|
48 |
+
|
49 |
+
|
50 |
+
if has_bz2:
|
51 |
+
|
52 |
+
class BZ2File(bz2.BZ2File):
|
53 |
+
if not PY310:
|
54 |
+
|
55 |
+
def write(self, b) -> int:
|
56 |
+
# Workaround issue where `bz2.BZ2File` expects `len`
|
57 |
+
# to return the number of bytes in `b` by converting
|
58 |
+
# `b` into something that meets that constraint with
|
59 |
+
# minimal copying.
|
60 |
+
#
|
61 |
+
# Note: This is fixed in Python 3.10.
|
62 |
+
return super().write(flatten_buffer(b))
|
63 |
+
|
64 |
+
|
65 |
+
if has_lzma:
|
66 |
+
|
67 |
+
class LZMAFile(lzma.LZMAFile):
|
68 |
+
if not PY310:
|
69 |
+
|
70 |
+
def write(self, b) -> int:
|
71 |
+
# Workaround issue where `lzma.LZMAFile` expects `len`
|
72 |
+
# to return the number of bytes in `b` by converting
|
73 |
+
# `b` into something that meets that constraint with
|
74 |
+
# minimal copying.
|
75 |
+
#
|
76 |
+
# Note: This is fixed in Python 3.10.
|
77 |
+
return super().write(flatten_buffer(b))
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" support numpy compatibility across versions """
|
2 |
+
import warnings
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
from pandas.util.version import Version
|
7 |
+
|
8 |
+
# numpy versioning
|
9 |
+
_np_version = np.__version__
|
10 |
+
_nlv = Version(_np_version)
|
11 |
+
np_version_lt1p23 = _nlv < Version("1.23")
|
12 |
+
np_version_gte1p24 = _nlv >= Version("1.24")
|
13 |
+
np_version_gte1p24p3 = _nlv >= Version("1.24.3")
|
14 |
+
np_version_gte1p25 = _nlv >= Version("1.25")
|
15 |
+
np_version_gt2 = _nlv >= Version("2.0.0.dev0")
|
16 |
+
is_numpy_dev = _nlv.dev is not None
|
17 |
+
_min_numpy_ver = "1.22.4"
|
18 |
+
|
19 |
+
|
20 |
+
if _nlv < Version(_min_numpy_ver):
|
21 |
+
raise ImportError(
|
22 |
+
f"this version of pandas is incompatible with numpy < {_min_numpy_ver}\n"
|
23 |
+
f"your numpy version is {_np_version}.\n"
|
24 |
+
f"Please upgrade numpy to >= {_min_numpy_ver} to use this pandas version"
|
25 |
+
)
|
26 |
+
|
27 |
+
|
28 |
+
np_long: type
|
29 |
+
np_ulong: type
|
30 |
+
|
31 |
+
if np_version_gt2:
|
32 |
+
try:
|
33 |
+
with warnings.catch_warnings():
|
34 |
+
warnings.filterwarnings(
|
35 |
+
"ignore",
|
36 |
+
r".*In the future `np\.long` will be defined as.*",
|
37 |
+
FutureWarning,
|
38 |
+
)
|
39 |
+
np_long = np.long # type: ignore[attr-defined]
|
40 |
+
np_ulong = np.ulong # type: ignore[attr-defined]
|
41 |
+
except AttributeError:
|
42 |
+
np_long = np.int_
|
43 |
+
np_ulong = np.uint
|
44 |
+
else:
|
45 |
+
np_long = np.int_
|
46 |
+
np_ulong = np.uint
|
47 |
+
|
48 |
+
|
49 |
+
__all__ = [
|
50 |
+
"np",
|
51 |
+
"_np_version",
|
52 |
+
"is_numpy_dev",
|
53 |
+
]
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.27 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/function.cpython-310.pyc
ADDED
Binary file (10.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/numpy/function.py
ADDED
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
For compatibility with numpy libraries, pandas functions or methods have to
|
3 |
+
accept '*args' and '**kwargs' parameters to accommodate numpy arguments that
|
4 |
+
are not actually used or respected in the pandas implementation.
|
5 |
+
|
6 |
+
To ensure that users do not abuse these parameters, validation is performed in
|
7 |
+
'validators.py' to make sure that any extra parameters passed correspond ONLY
|
8 |
+
to those in the numpy signature. Part of that validation includes whether or
|
9 |
+
not the user attempted to pass in non-default values for these extraneous
|
10 |
+
parameters. As we want to discourage users from relying on these parameters
|
11 |
+
when calling the pandas implementation, we want them only to pass in the
|
12 |
+
default values for these parameters.
|
13 |
+
|
14 |
+
This module provides a set of commonly used default arguments for functions and
|
15 |
+
methods that are spread throughout the codebase. This module will make it
|
16 |
+
easier to adjust to future upstream changes in the analogous numpy signatures.
|
17 |
+
"""
|
18 |
+
from __future__ import annotations
|
19 |
+
|
20 |
+
from typing import (
|
21 |
+
TYPE_CHECKING,
|
22 |
+
Any,
|
23 |
+
TypeVar,
|
24 |
+
cast,
|
25 |
+
overload,
|
26 |
+
)
|
27 |
+
|
28 |
+
import numpy as np
|
29 |
+
from numpy import ndarray
|
30 |
+
|
31 |
+
from pandas._libs.lib import (
|
32 |
+
is_bool,
|
33 |
+
is_integer,
|
34 |
+
)
|
35 |
+
from pandas.errors import UnsupportedFunctionCall
|
36 |
+
from pandas.util._validators import (
|
37 |
+
validate_args,
|
38 |
+
validate_args_and_kwargs,
|
39 |
+
validate_kwargs,
|
40 |
+
)
|
41 |
+
|
42 |
+
if TYPE_CHECKING:
|
43 |
+
from pandas._typing import (
|
44 |
+
Axis,
|
45 |
+
AxisInt,
|
46 |
+
)
|
47 |
+
|
48 |
+
AxisNoneT = TypeVar("AxisNoneT", Axis, None)
|
49 |
+
|
50 |
+
|
51 |
+
class CompatValidator:
|
52 |
+
def __init__(
|
53 |
+
self,
|
54 |
+
defaults,
|
55 |
+
fname=None,
|
56 |
+
method: str | None = None,
|
57 |
+
max_fname_arg_count=None,
|
58 |
+
) -> None:
|
59 |
+
self.fname = fname
|
60 |
+
self.method = method
|
61 |
+
self.defaults = defaults
|
62 |
+
self.max_fname_arg_count = max_fname_arg_count
|
63 |
+
|
64 |
+
def __call__(
|
65 |
+
self,
|
66 |
+
args,
|
67 |
+
kwargs,
|
68 |
+
fname=None,
|
69 |
+
max_fname_arg_count=None,
|
70 |
+
method: str | None = None,
|
71 |
+
) -> None:
|
72 |
+
if not args and not kwargs:
|
73 |
+
return None
|
74 |
+
|
75 |
+
fname = self.fname if fname is None else fname
|
76 |
+
max_fname_arg_count = (
|
77 |
+
self.max_fname_arg_count
|
78 |
+
if max_fname_arg_count is None
|
79 |
+
else max_fname_arg_count
|
80 |
+
)
|
81 |
+
method = self.method if method is None else method
|
82 |
+
|
83 |
+
if method == "args":
|
84 |
+
validate_args(fname, args, max_fname_arg_count, self.defaults)
|
85 |
+
elif method == "kwargs":
|
86 |
+
validate_kwargs(fname, kwargs, self.defaults)
|
87 |
+
elif method == "both":
|
88 |
+
validate_args_and_kwargs(
|
89 |
+
fname, args, kwargs, max_fname_arg_count, self.defaults
|
90 |
+
)
|
91 |
+
else:
|
92 |
+
raise ValueError(f"invalid validation method '{method}'")
|
93 |
+
|
94 |
+
|
95 |
+
ARGMINMAX_DEFAULTS = {"out": None}
|
96 |
+
validate_argmin = CompatValidator(
|
97 |
+
ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1
|
98 |
+
)
|
99 |
+
validate_argmax = CompatValidator(
|
100 |
+
ARGMINMAX_DEFAULTS, fname="argmax", method="both", max_fname_arg_count=1
|
101 |
+
)
|
102 |
+
|
103 |
+
|
104 |
+
def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]:
|
105 |
+
if isinstance(skipna, ndarray) or skipna is None:
|
106 |
+
args = (skipna,) + args
|
107 |
+
skipna = True
|
108 |
+
|
109 |
+
return skipna, args
|
110 |
+
|
111 |
+
|
112 |
+
def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
|
113 |
+
"""
|
114 |
+
If 'Series.argmin' is called via the 'numpy' library, the third parameter
|
115 |
+
in its signature is 'out', which takes either an ndarray or 'None', so
|
116 |
+
check if the 'skipna' parameter is either an instance of ndarray or is
|
117 |
+
None, since 'skipna' itself should be a boolean
|
118 |
+
"""
|
119 |
+
skipna, args = process_skipna(skipna, args)
|
120 |
+
validate_argmin(args, kwargs)
|
121 |
+
return skipna
|
122 |
+
|
123 |
+
|
124 |
+
def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
|
125 |
+
"""
|
126 |
+
If 'Series.argmax' is called via the 'numpy' library, the third parameter
|
127 |
+
in its signature is 'out', which takes either an ndarray or 'None', so
|
128 |
+
check if the 'skipna' parameter is either an instance of ndarray or is
|
129 |
+
None, since 'skipna' itself should be a boolean
|
130 |
+
"""
|
131 |
+
skipna, args = process_skipna(skipna, args)
|
132 |
+
validate_argmax(args, kwargs)
|
133 |
+
return skipna
|
134 |
+
|
135 |
+
|
136 |
+
ARGSORT_DEFAULTS: dict[str, int | str | None] = {}
|
137 |
+
ARGSORT_DEFAULTS["axis"] = -1
|
138 |
+
ARGSORT_DEFAULTS["kind"] = "quicksort"
|
139 |
+
ARGSORT_DEFAULTS["order"] = None
|
140 |
+
ARGSORT_DEFAULTS["kind"] = None
|
141 |
+
ARGSORT_DEFAULTS["stable"] = None
|
142 |
+
|
143 |
+
|
144 |
+
validate_argsort = CompatValidator(
|
145 |
+
ARGSORT_DEFAULTS, fname="argsort", max_fname_arg_count=0, method="both"
|
146 |
+
)
|
147 |
+
|
148 |
+
# two different signatures of argsort, this second validation for when the
|
149 |
+
# `kind` param is supported
|
150 |
+
ARGSORT_DEFAULTS_KIND: dict[str, int | None] = {}
|
151 |
+
ARGSORT_DEFAULTS_KIND["axis"] = -1
|
152 |
+
ARGSORT_DEFAULTS_KIND["order"] = None
|
153 |
+
ARGSORT_DEFAULTS_KIND["stable"] = None
|
154 |
+
validate_argsort_kind = CompatValidator(
|
155 |
+
ARGSORT_DEFAULTS_KIND, fname="argsort", max_fname_arg_count=0, method="both"
|
156 |
+
)
|
157 |
+
|
158 |
+
|
159 |
+
def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool:
|
160 |
+
"""
|
161 |
+
If 'Categorical.argsort' is called via the 'numpy' library, the first
|
162 |
+
parameter in its signature is 'axis', which takes either an integer or
|
163 |
+
'None', so check if the 'ascending' parameter has either integer type or is
|
164 |
+
None, since 'ascending' itself should be a boolean
|
165 |
+
"""
|
166 |
+
if is_integer(ascending) or ascending is None:
|
167 |
+
args = (ascending,) + args
|
168 |
+
ascending = True
|
169 |
+
|
170 |
+
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
|
171 |
+
ascending = cast(bool, ascending)
|
172 |
+
return ascending
|
173 |
+
|
174 |
+
|
175 |
+
CLIP_DEFAULTS: dict[str, Any] = {"out": None}
|
176 |
+
validate_clip = CompatValidator(
|
177 |
+
CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3
|
178 |
+
)
|
179 |
+
|
180 |
+
|
181 |
+
@overload
|
182 |
+
def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None:
|
183 |
+
...
|
184 |
+
|
185 |
+
|
186 |
+
@overload
|
187 |
+
def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT:
|
188 |
+
...
|
189 |
+
|
190 |
+
|
191 |
+
def validate_clip_with_axis(
|
192 |
+
axis: ndarray | AxisNoneT, args, kwargs
|
193 |
+
) -> AxisNoneT | None:
|
194 |
+
"""
|
195 |
+
If 'NDFrame.clip' is called via the numpy library, the third parameter in
|
196 |
+
its signature is 'out', which can takes an ndarray, so check if the 'axis'
|
197 |
+
parameter is an instance of ndarray, since 'axis' itself should either be
|
198 |
+
an integer or None
|
199 |
+
"""
|
200 |
+
if isinstance(axis, ndarray):
|
201 |
+
args = (axis,) + args
|
202 |
+
# error: Incompatible types in assignment (expression has type "None",
|
203 |
+
# variable has type "Union[ndarray[Any, Any], str, int]")
|
204 |
+
axis = None # type: ignore[assignment]
|
205 |
+
|
206 |
+
validate_clip(args, kwargs)
|
207 |
+
# error: Incompatible return value type (got "Union[ndarray[Any, Any],
|
208 |
+
# str, int]", expected "Union[str, int, None]")
|
209 |
+
return axis # type: ignore[return-value]
|
210 |
+
|
211 |
+
|
212 |
+
CUM_FUNC_DEFAULTS: dict[str, Any] = {}
|
213 |
+
CUM_FUNC_DEFAULTS["dtype"] = None
|
214 |
+
CUM_FUNC_DEFAULTS["out"] = None
|
215 |
+
validate_cum_func = CompatValidator(
|
216 |
+
CUM_FUNC_DEFAULTS, method="both", max_fname_arg_count=1
|
217 |
+
)
|
218 |
+
validate_cumsum = CompatValidator(
|
219 |
+
CUM_FUNC_DEFAULTS, fname="cumsum", method="both", max_fname_arg_count=1
|
220 |
+
)
|
221 |
+
|
222 |
+
|
223 |
+
def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
|
224 |
+
"""
|
225 |
+
If this function is called via the 'numpy' library, the third parameter in
|
226 |
+
its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so
|
227 |
+
check if the 'skipna' parameter is a boolean or not
|
228 |
+
"""
|
229 |
+
if not is_bool(skipna):
|
230 |
+
args = (skipna,) + args
|
231 |
+
skipna = True
|
232 |
+
elif isinstance(skipna, np.bool_):
|
233 |
+
skipna = bool(skipna)
|
234 |
+
|
235 |
+
validate_cum_func(args, kwargs, fname=name)
|
236 |
+
return skipna
|
237 |
+
|
238 |
+
|
239 |
+
ALLANY_DEFAULTS: dict[str, bool | None] = {}
|
240 |
+
ALLANY_DEFAULTS["dtype"] = None
|
241 |
+
ALLANY_DEFAULTS["out"] = None
|
242 |
+
ALLANY_DEFAULTS["keepdims"] = False
|
243 |
+
ALLANY_DEFAULTS["axis"] = None
|
244 |
+
validate_all = CompatValidator(
|
245 |
+
ALLANY_DEFAULTS, fname="all", method="both", max_fname_arg_count=1
|
246 |
+
)
|
247 |
+
validate_any = CompatValidator(
|
248 |
+
ALLANY_DEFAULTS, fname="any", method="both", max_fname_arg_count=1
|
249 |
+
)
|
250 |
+
|
251 |
+
LOGICAL_FUNC_DEFAULTS = {"out": None, "keepdims": False}
|
252 |
+
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs")
|
253 |
+
|
254 |
+
MINMAX_DEFAULTS = {"axis": None, "dtype": None, "out": None, "keepdims": False}
|
255 |
+
validate_min = CompatValidator(
|
256 |
+
MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1
|
257 |
+
)
|
258 |
+
validate_max = CompatValidator(
|
259 |
+
MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
|
260 |
+
)
|
261 |
+
|
262 |
+
RESHAPE_DEFAULTS: dict[str, str] = {"order": "C"}
|
263 |
+
validate_reshape = CompatValidator(
|
264 |
+
RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
|
265 |
+
)
|
266 |
+
|
267 |
+
REPEAT_DEFAULTS: dict[str, Any] = {"axis": None}
|
268 |
+
validate_repeat = CompatValidator(
|
269 |
+
REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1
|
270 |
+
)
|
271 |
+
|
272 |
+
ROUND_DEFAULTS: dict[str, Any] = {"out": None}
|
273 |
+
validate_round = CompatValidator(
|
274 |
+
ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
|
275 |
+
)
|
276 |
+
|
277 |
+
SORT_DEFAULTS: dict[str, int | str | None] = {}
|
278 |
+
SORT_DEFAULTS["axis"] = -1
|
279 |
+
SORT_DEFAULTS["kind"] = "quicksort"
|
280 |
+
SORT_DEFAULTS["order"] = None
|
281 |
+
validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs")
|
282 |
+
|
283 |
+
STAT_FUNC_DEFAULTS: dict[str, Any | None] = {}
|
284 |
+
STAT_FUNC_DEFAULTS["dtype"] = None
|
285 |
+
STAT_FUNC_DEFAULTS["out"] = None
|
286 |
+
|
287 |
+
SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
|
288 |
+
SUM_DEFAULTS["axis"] = None
|
289 |
+
SUM_DEFAULTS["keepdims"] = False
|
290 |
+
SUM_DEFAULTS["initial"] = None
|
291 |
+
|
292 |
+
PROD_DEFAULTS = SUM_DEFAULTS.copy()
|
293 |
+
|
294 |
+
MEAN_DEFAULTS = SUM_DEFAULTS.copy()
|
295 |
+
|
296 |
+
MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
|
297 |
+
MEDIAN_DEFAULTS["overwrite_input"] = False
|
298 |
+
MEDIAN_DEFAULTS["keepdims"] = False
|
299 |
+
|
300 |
+
STAT_FUNC_DEFAULTS["keepdims"] = False
|
301 |
+
|
302 |
+
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS, method="kwargs")
|
303 |
+
validate_sum = CompatValidator(
|
304 |
+
SUM_DEFAULTS, fname="sum", method="both", max_fname_arg_count=1
|
305 |
+
)
|
306 |
+
validate_prod = CompatValidator(
|
307 |
+
PROD_DEFAULTS, fname="prod", method="both", max_fname_arg_count=1
|
308 |
+
)
|
309 |
+
validate_mean = CompatValidator(
|
310 |
+
MEAN_DEFAULTS, fname="mean", method="both", max_fname_arg_count=1
|
311 |
+
)
|
312 |
+
validate_median = CompatValidator(
|
313 |
+
MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1
|
314 |
+
)
|
315 |
+
|
316 |
+
STAT_DDOF_FUNC_DEFAULTS: dict[str, bool | None] = {}
|
317 |
+
STAT_DDOF_FUNC_DEFAULTS["dtype"] = None
|
318 |
+
STAT_DDOF_FUNC_DEFAULTS["out"] = None
|
319 |
+
STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False
|
320 |
+
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs")
|
321 |
+
|
322 |
+
TAKE_DEFAULTS: dict[str, str | None] = {}
|
323 |
+
TAKE_DEFAULTS["out"] = None
|
324 |
+
TAKE_DEFAULTS["mode"] = "raise"
|
325 |
+
validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
|
326 |
+
|
327 |
+
|
328 |
+
def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> bool:
|
329 |
+
"""
|
330 |
+
If this function is called via the 'numpy' library, the third parameter in
|
331 |
+
its signature is 'axis', which takes either an ndarray or 'None', so check
|
332 |
+
if the 'convert' parameter is either an instance of ndarray or is None
|
333 |
+
"""
|
334 |
+
if isinstance(convert, ndarray) or convert is None:
|
335 |
+
args = (convert,) + args
|
336 |
+
convert = True
|
337 |
+
|
338 |
+
validate_take(args, kwargs, max_fname_arg_count=3, method="both")
|
339 |
+
return convert
|
340 |
+
|
341 |
+
|
342 |
+
TRANSPOSE_DEFAULTS = {"axes": None}
|
343 |
+
validate_transpose = CompatValidator(
|
344 |
+
TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0
|
345 |
+
)
|
346 |
+
|
347 |
+
|
348 |
+
def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None:
|
349 |
+
"""
|
350 |
+
'args' and 'kwargs' should be empty, except for allowed kwargs because all
|
351 |
+
of their necessary parameters are explicitly listed in the function
|
352 |
+
signature
|
353 |
+
"""
|
354 |
+
if allowed is None:
|
355 |
+
allowed = []
|
356 |
+
|
357 |
+
kwargs = set(kwargs) - set(allowed)
|
358 |
+
|
359 |
+
if len(args) + len(kwargs) > 0:
|
360 |
+
raise UnsupportedFunctionCall(
|
361 |
+
"numpy operations are not valid with groupby. "
|
362 |
+
f"Use .groupby(...).{name}() instead"
|
363 |
+
)
|
364 |
+
|
365 |
+
|
366 |
+
RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var")
|
367 |
+
|
368 |
+
|
369 |
+
def validate_resampler_func(method: str, args, kwargs) -> None:
|
370 |
+
"""
|
371 |
+
'args' and 'kwargs' should be empty because all of their necessary
|
372 |
+
parameters are explicitly listed in the function signature
|
373 |
+
"""
|
374 |
+
if len(args) + len(kwargs) > 0:
|
375 |
+
if method in RESAMPLER_NUMPY_OPS:
|
376 |
+
raise UnsupportedFunctionCall(
|
377 |
+
"numpy operations are not valid with resample. "
|
378 |
+
f"Use .resample(...).{method}() instead"
|
379 |
+
)
|
380 |
+
raise TypeError("too many arguments passed in")
|
381 |
+
|
382 |
+
|
383 |
+
def validate_minmax_axis(axis: AxisInt | None, ndim: int = 1) -> None:
|
384 |
+
"""
|
385 |
+
Ensure that the axis argument passed to min, max, argmin, or argmax is zero
|
386 |
+
or None, as otherwise it will be incorrectly ignored.
|
387 |
+
|
388 |
+
Parameters
|
389 |
+
----------
|
390 |
+
axis : int or None
|
391 |
+
ndim : int, default 1
|
392 |
+
|
393 |
+
Raises
|
394 |
+
------
|
395 |
+
ValueError
|
396 |
+
"""
|
397 |
+
if axis is None:
|
398 |
+
return
|
399 |
+
if axis >= ndim or (axis < 0 and ndim + axis < 0):
|
400 |
+
raise ValueError(f"`axis` must be fewer than the number of dimensions ({ndim})")
|
401 |
+
|
402 |
+
|
403 |
+
_validation_funcs = {
|
404 |
+
"median": validate_median,
|
405 |
+
"mean": validate_mean,
|
406 |
+
"min": validate_min,
|
407 |
+
"max": validate_max,
|
408 |
+
"sum": validate_sum,
|
409 |
+
"prod": validate_prod,
|
410 |
+
}
|
411 |
+
|
412 |
+
|
413 |
+
def validate_func(fname, args, kwargs) -> None:
|
414 |
+
if fname not in _validation_funcs:
|
415 |
+
return validate_stat_func(args, kwargs, fname=fname)
|
416 |
+
|
417 |
+
validation_func = _validation_funcs[fname]
|
418 |
+
return validation_func(args, kwargs)
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/pickle_compat.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Support pre-0.12 series pickle compatibility.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
import contextlib
|
7 |
+
import copy
|
8 |
+
import io
|
9 |
+
import pickle as pkl
|
10 |
+
from typing import TYPE_CHECKING
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from pandas._libs.arrays import NDArrayBacked
|
15 |
+
from pandas._libs.tslibs import BaseOffset
|
16 |
+
|
17 |
+
from pandas import Index
|
18 |
+
from pandas.core.arrays import (
|
19 |
+
DatetimeArray,
|
20 |
+
PeriodArray,
|
21 |
+
TimedeltaArray,
|
22 |
+
)
|
23 |
+
from pandas.core.internals import BlockManager
|
24 |
+
|
25 |
+
if TYPE_CHECKING:
|
26 |
+
from collections.abc import Generator
|
27 |
+
|
28 |
+
|
29 |
+
def load_reduce(self) -> None:
|
30 |
+
stack = self.stack
|
31 |
+
args = stack.pop()
|
32 |
+
func = stack[-1]
|
33 |
+
|
34 |
+
try:
|
35 |
+
stack[-1] = func(*args)
|
36 |
+
return
|
37 |
+
except TypeError as err:
|
38 |
+
# If we have a deprecated function,
|
39 |
+
# try to replace and try again.
|
40 |
+
|
41 |
+
msg = "_reconstruct: First argument must be a sub-type of ndarray"
|
42 |
+
|
43 |
+
if msg in str(err):
|
44 |
+
try:
|
45 |
+
cls = args[0]
|
46 |
+
stack[-1] = object.__new__(cls)
|
47 |
+
return
|
48 |
+
except TypeError:
|
49 |
+
pass
|
50 |
+
elif args and isinstance(args[0], type) and issubclass(args[0], BaseOffset):
|
51 |
+
# TypeError: object.__new__(Day) is not safe, use Day.__new__()
|
52 |
+
cls = args[0]
|
53 |
+
stack[-1] = cls.__new__(*args)
|
54 |
+
return
|
55 |
+
elif args and issubclass(args[0], PeriodArray):
|
56 |
+
cls = args[0]
|
57 |
+
stack[-1] = NDArrayBacked.__new__(*args)
|
58 |
+
return
|
59 |
+
|
60 |
+
raise
|
61 |
+
|
62 |
+
|
63 |
+
# If classes are moved, provide compat here.
|
64 |
+
_class_locations_map = {
|
65 |
+
("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
|
66 |
+
# 15477
|
67 |
+
("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"),
|
68 |
+
# Re-routing unpickle block logic to go through _unpickle_block instead
|
69 |
+
# for pandas <= 1.3.5
|
70 |
+
("pandas.core.internals.blocks", "new_block"): (
|
71 |
+
"pandas._libs.internals",
|
72 |
+
"_unpickle_block",
|
73 |
+
),
|
74 |
+
("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"),
|
75 |
+
("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
|
76 |
+
# 10890
|
77 |
+
("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
|
78 |
+
("pandas.sparse.series", "SparseTimeSeries"): (
|
79 |
+
"pandas.core.sparse.series",
|
80 |
+
"SparseSeries",
|
81 |
+
),
|
82 |
+
# 12588, extensions moving
|
83 |
+
("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
|
84 |
+
("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
|
85 |
+
# 18543 moving period
|
86 |
+
("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
|
87 |
+
("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
|
88 |
+
# 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
|
89 |
+
("pandas.tslib", "__nat_unpickle"): (
|
90 |
+
"pandas._libs.tslibs.nattype",
|
91 |
+
"__nat_unpickle",
|
92 |
+
),
|
93 |
+
("pandas._libs.tslib", "__nat_unpickle"): (
|
94 |
+
"pandas._libs.tslibs.nattype",
|
95 |
+
"__nat_unpickle",
|
96 |
+
),
|
97 |
+
# 15998 top-level dirs moving
|
98 |
+
("pandas.sparse.array", "SparseArray"): (
|
99 |
+
"pandas.core.arrays.sparse",
|
100 |
+
"SparseArray",
|
101 |
+
),
|
102 |
+
("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
|
103 |
+
("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
|
104 |
+
("pandas.indexes.numeric", "Int64Index"): (
|
105 |
+
"pandas.core.indexes.base",
|
106 |
+
"Index", # updated in 50775
|
107 |
+
),
|
108 |
+
("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
|
109 |
+
("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
|
110 |
+
("pandas.tseries.index", "_new_DatetimeIndex"): (
|
111 |
+
"pandas.core.indexes.datetimes",
|
112 |
+
"_new_DatetimeIndex",
|
113 |
+
),
|
114 |
+
("pandas.tseries.index", "DatetimeIndex"): (
|
115 |
+
"pandas.core.indexes.datetimes",
|
116 |
+
"DatetimeIndex",
|
117 |
+
),
|
118 |
+
("pandas.tseries.period", "PeriodIndex"): (
|
119 |
+
"pandas.core.indexes.period",
|
120 |
+
"PeriodIndex",
|
121 |
+
),
|
122 |
+
# 19269, arrays moving
|
123 |
+
("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
|
124 |
+
# 19939, add timedeltaindex, float64index compat from 15998 move
|
125 |
+
("pandas.tseries.tdi", "TimedeltaIndex"): (
|
126 |
+
"pandas.core.indexes.timedeltas",
|
127 |
+
"TimedeltaIndex",
|
128 |
+
),
|
129 |
+
("pandas.indexes.numeric", "Float64Index"): (
|
130 |
+
"pandas.core.indexes.base",
|
131 |
+
"Index", # updated in 50775
|
132 |
+
),
|
133 |
+
# 50775, remove Int64Index, UInt64Index & Float64Index from codabase
|
134 |
+
("pandas.core.indexes.numeric", "Int64Index"): (
|
135 |
+
"pandas.core.indexes.base",
|
136 |
+
"Index",
|
137 |
+
),
|
138 |
+
("pandas.core.indexes.numeric", "UInt64Index"): (
|
139 |
+
"pandas.core.indexes.base",
|
140 |
+
"Index",
|
141 |
+
),
|
142 |
+
("pandas.core.indexes.numeric", "Float64Index"): (
|
143 |
+
"pandas.core.indexes.base",
|
144 |
+
"Index",
|
145 |
+
),
|
146 |
+
("pandas.core.arrays.sparse.dtype", "SparseDtype"): (
|
147 |
+
"pandas.core.dtypes.dtypes",
|
148 |
+
"SparseDtype",
|
149 |
+
),
|
150 |
+
}
|
151 |
+
|
152 |
+
|
153 |
+
# our Unpickler sub-class to override methods and some dispatcher
|
154 |
+
# functions for compat and uses a non-public class of the pickle module.
|
155 |
+
|
156 |
+
|
157 |
+
class Unpickler(pkl._Unpickler):
|
158 |
+
def find_class(self, module, name):
|
159 |
+
# override superclass
|
160 |
+
key = (module, name)
|
161 |
+
module, name = _class_locations_map.get(key, key)
|
162 |
+
return super().find_class(module, name)
|
163 |
+
|
164 |
+
|
165 |
+
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
|
166 |
+
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
|
167 |
+
|
168 |
+
|
169 |
+
def load_newobj(self) -> None:
|
170 |
+
args = self.stack.pop()
|
171 |
+
cls = self.stack[-1]
|
172 |
+
|
173 |
+
# compat
|
174 |
+
if issubclass(cls, Index):
|
175 |
+
obj = object.__new__(cls)
|
176 |
+
elif issubclass(cls, DatetimeArray) and not args:
|
177 |
+
arr = np.array([], dtype="M8[ns]")
|
178 |
+
obj = cls.__new__(cls, arr, arr.dtype)
|
179 |
+
elif issubclass(cls, TimedeltaArray) and not args:
|
180 |
+
arr = np.array([], dtype="m8[ns]")
|
181 |
+
obj = cls.__new__(cls, arr, arr.dtype)
|
182 |
+
elif cls is BlockManager and not args:
|
183 |
+
obj = cls.__new__(cls, (), [], False)
|
184 |
+
else:
|
185 |
+
obj = cls.__new__(cls, *args)
|
186 |
+
|
187 |
+
self.stack[-1] = obj
|
188 |
+
|
189 |
+
|
190 |
+
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
|
191 |
+
|
192 |
+
|
193 |
+
def load_newobj_ex(self) -> None:
|
194 |
+
kwargs = self.stack.pop()
|
195 |
+
args = self.stack.pop()
|
196 |
+
cls = self.stack.pop()
|
197 |
+
|
198 |
+
# compat
|
199 |
+
if issubclass(cls, Index):
|
200 |
+
obj = object.__new__(cls)
|
201 |
+
else:
|
202 |
+
obj = cls.__new__(cls, *args, **kwargs)
|
203 |
+
self.append(obj)
|
204 |
+
|
205 |
+
|
206 |
+
try:
|
207 |
+
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
|
208 |
+
except (AttributeError, KeyError):
|
209 |
+
pass
|
210 |
+
|
211 |
+
|
212 |
+
def load(fh, encoding: str | None = None, is_verbose: bool = False):
|
213 |
+
"""
|
214 |
+
Load a pickle, with a provided encoding,
|
215 |
+
|
216 |
+
Parameters
|
217 |
+
----------
|
218 |
+
fh : a filelike object
|
219 |
+
encoding : an optional encoding
|
220 |
+
is_verbose : show exception output
|
221 |
+
"""
|
222 |
+
try:
|
223 |
+
fh.seek(0)
|
224 |
+
if encoding is not None:
|
225 |
+
up = Unpickler(fh, encoding=encoding)
|
226 |
+
else:
|
227 |
+
up = Unpickler(fh)
|
228 |
+
# "Unpickler" has no attribute "is_verbose" [attr-defined]
|
229 |
+
up.is_verbose = is_verbose # type: ignore[attr-defined]
|
230 |
+
|
231 |
+
return up.load()
|
232 |
+
except (ValueError, TypeError):
|
233 |
+
raise
|
234 |
+
|
235 |
+
|
236 |
+
def loads(
|
237 |
+
bytes_object: bytes,
|
238 |
+
*,
|
239 |
+
fix_imports: bool = True,
|
240 |
+
encoding: str = "ASCII",
|
241 |
+
errors: str = "strict",
|
242 |
+
):
|
243 |
+
"""
|
244 |
+
Analogous to pickle._loads.
|
245 |
+
"""
|
246 |
+
fd = io.BytesIO(bytes_object)
|
247 |
+
return Unpickler(
|
248 |
+
fd, fix_imports=fix_imports, encoding=encoding, errors=errors
|
249 |
+
).load()
|
250 |
+
|
251 |
+
|
252 |
+
@contextlib.contextmanager
|
253 |
+
def patch_pickle() -> Generator[None, None, None]:
|
254 |
+
"""
|
255 |
+
Temporarily patch pickle to use our unpickler.
|
256 |
+
"""
|
257 |
+
orig_loads = pkl.loads
|
258 |
+
try:
|
259 |
+
setattr(pkl, "loads", loads)
|
260 |
+
yield
|
261 |
+
finally:
|
262 |
+
setattr(pkl, "loads", orig_loads)
|
env-llmeval/lib/python3.10/site-packages/pandas/compat/pyarrow.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" support pyarrow compatibility across versions """
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
from pandas.util.version import Version
|
6 |
+
|
7 |
+
try:
|
8 |
+
import pyarrow as pa
|
9 |
+
|
10 |
+
_palv = Version(Version(pa.__version__).base_version)
|
11 |
+
pa_version_under10p1 = _palv < Version("10.0.1")
|
12 |
+
pa_version_under11p0 = _palv < Version("11.0.0")
|
13 |
+
pa_version_under12p0 = _palv < Version("12.0.0")
|
14 |
+
pa_version_under13p0 = _palv < Version("13.0.0")
|
15 |
+
pa_version_under14p0 = _palv < Version("14.0.0")
|
16 |
+
pa_version_under14p1 = _palv < Version("14.0.1")
|
17 |
+
pa_version_under15p0 = _palv < Version("15.0.0")
|
18 |
+
pa_version_under16p0 = _palv < Version("16.0.0")
|
19 |
+
except ImportError:
|
20 |
+
pa_version_under10p1 = True
|
21 |
+
pa_version_under11p0 = True
|
22 |
+
pa_version_under12p0 = True
|
23 |
+
pa_version_under13p0 = True
|
24 |
+
pa_version_under14p0 = True
|
25 |
+
pa_version_under14p1 = True
|
26 |
+
pa_version_under15p0 = True
|
27 |
+
pa_version_under16p0 = True
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (184 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_api.cpython-310.pyc
ADDED
Binary file (10.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_arithmetic.cpython-310.pyc
ADDED
Binary file (30.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_constructors.cpython-310.pyc
ADDED
Binary file (78.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_cumulative.cpython-310.pyc
ADDED
Binary file (4.45 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_formats.cpython-310.pyc
ADDED
Binary file (18.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_iteration.cpython-310.pyc
ADDED
Binary file (1.63 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_logical_ops.cpython-310.pyc
ADDED
Binary file (14.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_missing.cpython-310.pyc
ADDED
Binary file (3.71 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_npfuncs.cpython-310.pyc
ADDED
Binary file (1.75 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_reductions.cpython-310.pyc
ADDED
Binary file (6.81 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_subclass.cpython-310.pyc
ADDED
Binary file (3.57 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_ufunc.cpython-310.pyc
ADDED
Binary file (11.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_unary.cpython-310.pyc
ADDED
Binary file (1.94 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_validate.cpython-310.pyc
ADDED
Binary file (857 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (194 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc
ADDED
Binary file (8.63 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc
ADDED
Binary file (24.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc
ADDED
Binary file (3.82 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc
ADDED
Binary file (787 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc
ADDED
Binary file (1.51 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc
ADDED
Binary file (4.05 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py
ADDED
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
Categorical,
|
6 |
+
DataFrame,
|
7 |
+
Index,
|
8 |
+
Series,
|
9 |
+
Timestamp,
|
10 |
+
date_range,
|
11 |
+
period_range,
|
12 |
+
timedelta_range,
|
13 |
+
)
|
14 |
+
import pandas._testing as tm
|
15 |
+
from pandas.core.arrays.categorical import CategoricalAccessor
|
16 |
+
from pandas.core.indexes.accessors import Properties
|
17 |
+
|
18 |
+
|
19 |
+
class TestCatAccessor:
|
20 |
+
@pytest.mark.parametrize(
|
21 |
+
"method",
|
22 |
+
[
|
23 |
+
lambda x: x.cat.set_categories([1, 2, 3]),
|
24 |
+
lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
|
25 |
+
lambda x: x.cat.rename_categories([1, 2, 3]),
|
26 |
+
lambda x: x.cat.remove_unused_categories(),
|
27 |
+
lambda x: x.cat.remove_categories([2]),
|
28 |
+
lambda x: x.cat.add_categories([4]),
|
29 |
+
lambda x: x.cat.as_ordered(),
|
30 |
+
lambda x: x.cat.as_unordered(),
|
31 |
+
],
|
32 |
+
)
|
33 |
+
def test_getname_categorical_accessor(self, method):
|
34 |
+
# GH#17509
|
35 |
+
ser = Series([1, 2, 3], name="A").astype("category")
|
36 |
+
expected = "A"
|
37 |
+
result = method(ser).name
|
38 |
+
assert result == expected
|
39 |
+
|
40 |
+
def test_cat_accessor(self):
|
41 |
+
ser = Series(Categorical(["a", "b", np.nan, "a"]))
|
42 |
+
tm.assert_index_equal(ser.cat.categories, Index(["a", "b"]))
|
43 |
+
assert not ser.cat.ordered, False
|
44 |
+
|
45 |
+
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
|
46 |
+
|
47 |
+
res = ser.cat.set_categories(["b", "a"])
|
48 |
+
tm.assert_categorical_equal(res.values, exp)
|
49 |
+
|
50 |
+
ser[:] = "a"
|
51 |
+
ser = ser.cat.remove_unused_categories()
|
52 |
+
tm.assert_index_equal(ser.cat.categories, Index(["a"]))
|
53 |
+
|
54 |
+
def test_cat_accessor_api(self):
|
55 |
+
# GH#9322
|
56 |
+
|
57 |
+
assert Series.cat is CategoricalAccessor
|
58 |
+
ser = Series(list("aabbcde")).astype("category")
|
59 |
+
assert isinstance(ser.cat, CategoricalAccessor)
|
60 |
+
|
61 |
+
invalid = Series([1])
|
62 |
+
with pytest.raises(AttributeError, match="only use .cat accessor"):
|
63 |
+
invalid.cat
|
64 |
+
assert not hasattr(invalid, "cat")
|
65 |
+
|
66 |
+
def test_cat_accessor_no_new_attributes(self):
|
67 |
+
# https://github.com/pandas-dev/pandas/issues/10673
|
68 |
+
cat = Series(list("aabbcde")).astype("category")
|
69 |
+
with pytest.raises(AttributeError, match="You cannot add any new attribute"):
|
70 |
+
cat.cat.xlabel = "a"
|
71 |
+
|
72 |
+
def test_categorical_delegations(self):
|
73 |
+
# invalid accessor
|
74 |
+
msg = r"Can only use \.cat accessor with a 'category' dtype"
|
75 |
+
with pytest.raises(AttributeError, match=msg):
|
76 |
+
Series([1, 2, 3]).cat
|
77 |
+
with pytest.raises(AttributeError, match=msg):
|
78 |
+
Series([1, 2, 3]).cat()
|
79 |
+
with pytest.raises(AttributeError, match=msg):
|
80 |
+
Series(["a", "b", "c"]).cat
|
81 |
+
with pytest.raises(AttributeError, match=msg):
|
82 |
+
Series(np.arange(5.0)).cat
|
83 |
+
with pytest.raises(AttributeError, match=msg):
|
84 |
+
Series([Timestamp("20130101")]).cat
|
85 |
+
|
86 |
+
# Series should delegate calls to '.categories', '.codes', '.ordered'
|
87 |
+
# and the methods '.set_categories()' 'drop_unused_categories()' to the
|
88 |
+
# categorical
|
89 |
+
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
|
90 |
+
exp_categories = Index(["a", "b", "c"])
|
91 |
+
tm.assert_index_equal(ser.cat.categories, exp_categories)
|
92 |
+
ser = ser.cat.rename_categories([1, 2, 3])
|
93 |
+
exp_categories = Index([1, 2, 3])
|
94 |
+
tm.assert_index_equal(ser.cat.categories, exp_categories)
|
95 |
+
|
96 |
+
exp_codes = Series([0, 1, 2, 0], dtype="int8")
|
97 |
+
tm.assert_series_equal(ser.cat.codes, exp_codes)
|
98 |
+
|
99 |
+
assert ser.cat.ordered
|
100 |
+
ser = ser.cat.as_unordered()
|
101 |
+
assert not ser.cat.ordered
|
102 |
+
|
103 |
+
ser = ser.cat.as_ordered()
|
104 |
+
assert ser.cat.ordered
|
105 |
+
|
106 |
+
# reorder
|
107 |
+
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
|
108 |
+
exp_categories = Index(["c", "b", "a"])
|
109 |
+
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
|
110 |
+
ser = ser.cat.set_categories(["c", "b", "a"])
|
111 |
+
tm.assert_index_equal(ser.cat.categories, exp_categories)
|
112 |
+
tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
|
113 |
+
tm.assert_numpy_array_equal(ser.__array__(), exp_values)
|
114 |
+
|
115 |
+
# remove unused categories
|
116 |
+
ser = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"]))
|
117 |
+
exp_categories = Index(["a", "b"])
|
118 |
+
exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
|
119 |
+
ser = ser.cat.remove_unused_categories()
|
120 |
+
tm.assert_index_equal(ser.cat.categories, exp_categories)
|
121 |
+
tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)
|
122 |
+
tm.assert_numpy_array_equal(ser.__array__(), exp_values)
|
123 |
+
|
124 |
+
# This method is likely to be confused, so test that it raises an error
|
125 |
+
# on wrong inputs:
|
126 |
+
msg = "'Series' object has no attribute 'set_categories'"
|
127 |
+
with pytest.raises(AttributeError, match=msg):
|
128 |
+
ser.set_categories([4, 3, 2, 1])
|
129 |
+
|
130 |
+
# right: ser.cat.set_categories([4,3,2,1])
|
131 |
+
|
132 |
+
# GH#18862 (let Series.cat.rename_categories take callables)
|
133 |
+
ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))
|
134 |
+
result = ser.cat.rename_categories(lambda x: x.upper())
|
135 |
+
expected = Series(
|
136 |
+
Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True)
|
137 |
+
)
|
138 |
+
tm.assert_series_equal(result, expected)
|
139 |
+
|
140 |
+
@pytest.mark.parametrize(
|
141 |
+
"idx",
|
142 |
+
[
|
143 |
+
date_range("1/1/2015", periods=5),
|
144 |
+
date_range("1/1/2015", periods=5, tz="MET"),
|
145 |
+
period_range("1/1/2015", freq="D", periods=5),
|
146 |
+
timedelta_range("1 days", "10 days"),
|
147 |
+
],
|
148 |
+
)
|
149 |
+
def test_dt_accessor_api_for_categorical(self, idx):
|
150 |
+
# https://github.com/pandas-dev/pandas/issues/10661
|
151 |
+
|
152 |
+
ser = Series(idx)
|
153 |
+
cat = ser.astype("category")
|
154 |
+
|
155 |
+
# only testing field (like .day)
|
156 |
+
# and bool (is_month_start)
|
157 |
+
attr_names = type(ser._values)._datetimelike_ops
|
158 |
+
|
159 |
+
assert isinstance(cat.dt, Properties)
|
160 |
+
|
161 |
+
special_func_defs = [
|
162 |
+
("strftime", ("%Y-%m-%d",), {}),
|
163 |
+
("round", ("D",), {}),
|
164 |
+
("floor", ("D",), {}),
|
165 |
+
("ceil", ("D",), {}),
|
166 |
+
("asfreq", ("D",), {}),
|
167 |
+
("as_unit", ("s"), {}),
|
168 |
+
]
|
169 |
+
if idx.dtype == "M8[ns]":
|
170 |
+
# exclude dt64tz since that is already localized and would raise
|
171 |
+
tup = ("tz_localize", ("UTC",), {})
|
172 |
+
special_func_defs.append(tup)
|
173 |
+
elif idx.dtype.kind == "M":
|
174 |
+
# exclude dt64 since that is not localized so would raise
|
175 |
+
tup = ("tz_convert", ("EST",), {})
|
176 |
+
special_func_defs.append(tup)
|
177 |
+
|
178 |
+
_special_func_names = [f[0] for f in special_func_defs]
|
179 |
+
|
180 |
+
_ignore_names = ["components", "tz_localize", "tz_convert"]
|
181 |
+
|
182 |
+
func_names = [
|
183 |
+
fname
|
184 |
+
for fname in dir(ser.dt)
|
185 |
+
if not (
|
186 |
+
fname.startswith("_")
|
187 |
+
or fname in attr_names
|
188 |
+
or fname in _special_func_names
|
189 |
+
or fname in _ignore_names
|
190 |
+
)
|
191 |
+
]
|
192 |
+
|
193 |
+
func_defs = [(fname, (), {}) for fname in func_names]
|
194 |
+
func_defs.extend(
|
195 |
+
f_def for f_def in special_func_defs if f_def[0] in dir(ser.dt)
|
196 |
+
)
|
197 |
+
|
198 |
+
for func, args, kwargs in func_defs:
|
199 |
+
warn_cls = []
|
200 |
+
if func == "to_period" and getattr(idx, "tz", None) is not None:
|
201 |
+
# dropping TZ
|
202 |
+
warn_cls.append(UserWarning)
|
203 |
+
if func == "to_pydatetime":
|
204 |
+
# deprecated to return Index[object]
|
205 |
+
warn_cls.append(FutureWarning)
|
206 |
+
if warn_cls:
|
207 |
+
warn_cls = tuple(warn_cls)
|
208 |
+
else:
|
209 |
+
warn_cls = None
|
210 |
+
with tm.assert_produces_warning(warn_cls):
|
211 |
+
res = getattr(cat.dt, func)(*args, **kwargs)
|
212 |
+
exp = getattr(ser.dt, func)(*args, **kwargs)
|
213 |
+
|
214 |
+
tm.assert_equal(res, exp)
|
215 |
+
|
216 |
+
for attr in attr_names:
|
217 |
+
res = getattr(cat.dt, attr)
|
218 |
+
exp = getattr(ser.dt, attr)
|
219 |
+
|
220 |
+
tm.assert_equal(res, exp)
|
221 |
+
|
222 |
+
def test_dt_accessor_api_for_categorical_invalid(self):
|
223 |
+
invalid = Series([1, 2, 3]).astype("category")
|
224 |
+
msg = "Can only use .dt accessor with datetimelike"
|
225 |
+
|
226 |
+
with pytest.raises(AttributeError, match=msg):
|
227 |
+
invalid.dt
|
228 |
+
assert not hasattr(invalid, "str")
|
229 |
+
|
230 |
+
def test_set_categories_setitem(self):
|
231 |
+
# GH#43334
|
232 |
+
|
233 |
+
df = DataFrame({"Survived": [1, 0, 1], "Sex": [0, 1, 1]}, dtype="category")
|
234 |
+
|
235 |
+
df["Survived"] = df["Survived"].cat.rename_categories(["No", "Yes"])
|
236 |
+
df["Sex"] = df["Sex"].cat.rename_categories(["female", "male"])
|
237 |
+
|
238 |
+
# values should not be coerced to NaN
|
239 |
+
assert list(df["Sex"]) == ["female", "male", "male"]
|
240 |
+
assert list(df["Survived"]) == ["Yes", "No", "Yes"]
|
241 |
+
|
242 |
+
df["Sex"] = Categorical(df["Sex"], categories=["female", "male"], ordered=False)
|
243 |
+
df["Survived"] = Categorical(
|
244 |
+
df["Survived"], categories=["No", "Yes"], ordered=False
|
245 |
+
)
|
246 |
+
|
247 |
+
# values should not be coerced to NaN
|
248 |
+
assert list(df["Sex"]) == ["female", "male", "male"]
|
249 |
+
assert list(df["Survived"]) == ["Yes", "No", "Yes"]
|
250 |
+
|
251 |
+
def test_categorical_of_booleans_is_boolean(self):
|
252 |
+
# https://github.com/pandas-dev/pandas/issues/46313
|
253 |
+
df = DataFrame(
|
254 |
+
{"int_cat": [1, 2, 3], "bool_cat": [True, False, False]}, dtype="category"
|
255 |
+
)
|
256 |
+
value = df["bool_cat"].cat.categories.dtype
|
257 |
+
expected = np.dtype(np.bool_)
|
258 |
+
assert value is expected
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_dt_accessor.py
ADDED
@@ -0,0 +1,843 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import calendar
|
2 |
+
from datetime import (
|
3 |
+
date,
|
4 |
+
datetime,
|
5 |
+
time,
|
6 |
+
)
|
7 |
+
import locale
|
8 |
+
import unicodedata
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
import pytest
|
12 |
+
import pytz
|
13 |
+
|
14 |
+
from pandas._libs.tslibs.timezones import maybe_get_tz
|
15 |
+
from pandas.errors import SettingWithCopyError
|
16 |
+
|
17 |
+
from pandas.core.dtypes.common import (
|
18 |
+
is_integer_dtype,
|
19 |
+
is_list_like,
|
20 |
+
)
|
21 |
+
|
22 |
+
import pandas as pd
|
23 |
+
from pandas import (
|
24 |
+
DataFrame,
|
25 |
+
DatetimeIndex,
|
26 |
+
Index,
|
27 |
+
Period,
|
28 |
+
PeriodIndex,
|
29 |
+
Series,
|
30 |
+
TimedeltaIndex,
|
31 |
+
date_range,
|
32 |
+
period_range,
|
33 |
+
timedelta_range,
|
34 |
+
)
|
35 |
+
import pandas._testing as tm
|
36 |
+
from pandas.core.arrays import (
|
37 |
+
DatetimeArray,
|
38 |
+
PeriodArray,
|
39 |
+
TimedeltaArray,
|
40 |
+
)
|
41 |
+
|
42 |
+
ok_for_period = PeriodArray._datetimelike_ops
|
43 |
+
ok_for_period_methods = ["strftime", "to_timestamp", "asfreq"]
|
44 |
+
ok_for_dt = DatetimeArray._datetimelike_ops
|
45 |
+
ok_for_dt_methods = [
|
46 |
+
"to_period",
|
47 |
+
"to_pydatetime",
|
48 |
+
"tz_localize",
|
49 |
+
"tz_convert",
|
50 |
+
"normalize",
|
51 |
+
"strftime",
|
52 |
+
"round",
|
53 |
+
"floor",
|
54 |
+
"ceil",
|
55 |
+
"day_name",
|
56 |
+
"month_name",
|
57 |
+
"isocalendar",
|
58 |
+
"as_unit",
|
59 |
+
]
|
60 |
+
ok_for_td = TimedeltaArray._datetimelike_ops
|
61 |
+
ok_for_td_methods = [
|
62 |
+
"components",
|
63 |
+
"to_pytimedelta",
|
64 |
+
"total_seconds",
|
65 |
+
"round",
|
66 |
+
"floor",
|
67 |
+
"ceil",
|
68 |
+
"as_unit",
|
69 |
+
]
|
70 |
+
|
71 |
+
|
72 |
+
def get_dir(ser):
|
73 |
+
# check limited display api
|
74 |
+
results = [r for r in ser.dt.__dir__() if not r.startswith("_")]
|
75 |
+
return sorted(set(results))
|
76 |
+
|
77 |
+
|
78 |
+
class TestSeriesDatetimeValues:
|
79 |
+
def _compare(self, ser, name):
|
80 |
+
# GH 7207, 11128
|
81 |
+
# test .dt namespace accessor
|
82 |
+
|
83 |
+
def get_expected(ser, prop):
|
84 |
+
result = getattr(Index(ser._values), prop)
|
85 |
+
if isinstance(result, np.ndarray):
|
86 |
+
if is_integer_dtype(result):
|
87 |
+
result = result.astype("int64")
|
88 |
+
elif not is_list_like(result) or isinstance(result, DataFrame):
|
89 |
+
return result
|
90 |
+
return Series(result, index=ser.index, name=ser.name)
|
91 |
+
|
92 |
+
left = getattr(ser.dt, name)
|
93 |
+
right = get_expected(ser, name)
|
94 |
+
if not (is_list_like(left) and is_list_like(right)):
|
95 |
+
assert left == right
|
96 |
+
elif isinstance(left, DataFrame):
|
97 |
+
tm.assert_frame_equal(left, right)
|
98 |
+
else:
|
99 |
+
tm.assert_series_equal(left, right)
|
100 |
+
|
101 |
+
@pytest.mark.parametrize("freq", ["D", "s", "ms"])
|
102 |
+
def test_dt_namespace_accessor_datetime64(self, freq):
|
103 |
+
# GH#7207, GH#11128
|
104 |
+
# test .dt namespace accessor
|
105 |
+
|
106 |
+
# datetimeindex
|
107 |
+
dti = date_range("20130101", periods=5, freq=freq)
|
108 |
+
ser = Series(dti, name="xxx")
|
109 |
+
|
110 |
+
for prop in ok_for_dt:
|
111 |
+
# we test freq below
|
112 |
+
if prop != "freq":
|
113 |
+
self._compare(ser, prop)
|
114 |
+
|
115 |
+
for prop in ok_for_dt_methods:
|
116 |
+
getattr(ser.dt, prop)
|
117 |
+
|
118 |
+
msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated"
|
119 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
120 |
+
result = ser.dt.to_pydatetime()
|
121 |
+
assert isinstance(result, np.ndarray)
|
122 |
+
assert result.dtype == object
|
123 |
+
|
124 |
+
result = ser.dt.tz_localize("US/Eastern")
|
125 |
+
exp_values = DatetimeIndex(ser.values).tz_localize("US/Eastern")
|
126 |
+
expected = Series(exp_values, index=ser.index, name="xxx")
|
127 |
+
tm.assert_series_equal(result, expected)
|
128 |
+
|
129 |
+
tz_result = result.dt.tz
|
130 |
+
assert str(tz_result) == "US/Eastern"
|
131 |
+
freq_result = ser.dt.freq
|
132 |
+
assert freq_result == DatetimeIndex(ser.values, freq="infer").freq
|
133 |
+
|
134 |
+
# let's localize, then convert
|
135 |
+
result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern")
|
136 |
+
exp_values = (
|
137 |
+
DatetimeIndex(ser.values).tz_localize("UTC").tz_convert("US/Eastern")
|
138 |
+
)
|
139 |
+
expected = Series(exp_values, index=ser.index, name="xxx")
|
140 |
+
tm.assert_series_equal(result, expected)
|
141 |
+
|
142 |
+
def test_dt_namespace_accessor_datetime64tz(self):
|
143 |
+
# GH#7207, GH#11128
|
144 |
+
# test .dt namespace accessor
|
145 |
+
|
146 |
+
# datetimeindex with tz
|
147 |
+
dti = date_range("20130101", periods=5, tz="US/Eastern")
|
148 |
+
ser = Series(dti, name="xxx")
|
149 |
+
for prop in ok_for_dt:
|
150 |
+
# we test freq below
|
151 |
+
if prop != "freq":
|
152 |
+
self._compare(ser, prop)
|
153 |
+
|
154 |
+
for prop in ok_for_dt_methods:
|
155 |
+
getattr(ser.dt, prop)
|
156 |
+
|
157 |
+
msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated"
|
158 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
159 |
+
result = ser.dt.to_pydatetime()
|
160 |
+
assert isinstance(result, np.ndarray)
|
161 |
+
assert result.dtype == object
|
162 |
+
|
163 |
+
result = ser.dt.tz_convert("CET")
|
164 |
+
expected = Series(ser._values.tz_convert("CET"), index=ser.index, name="xxx")
|
165 |
+
tm.assert_series_equal(result, expected)
|
166 |
+
|
167 |
+
tz_result = result.dt.tz
|
168 |
+
assert str(tz_result) == "CET"
|
169 |
+
freq_result = ser.dt.freq
|
170 |
+
assert freq_result == DatetimeIndex(ser.values, freq="infer").freq
|
171 |
+
|
172 |
+
def test_dt_namespace_accessor_timedelta(self):
|
173 |
+
# GH#7207, GH#11128
|
174 |
+
# test .dt namespace accessor
|
175 |
+
|
176 |
+
# timedelta index
|
177 |
+
cases = [
|
178 |
+
Series(
|
179 |
+
timedelta_range("1 day", periods=5), index=list("abcde"), name="xxx"
|
180 |
+
),
|
181 |
+
Series(timedelta_range("1 day 01:23:45", periods=5, freq="s"), name="xxx"),
|
182 |
+
Series(
|
183 |
+
timedelta_range("2 days 01:23:45.012345", periods=5, freq="ms"),
|
184 |
+
name="xxx",
|
185 |
+
),
|
186 |
+
]
|
187 |
+
for ser in cases:
|
188 |
+
for prop in ok_for_td:
|
189 |
+
# we test freq below
|
190 |
+
if prop != "freq":
|
191 |
+
self._compare(ser, prop)
|
192 |
+
|
193 |
+
for prop in ok_for_td_methods:
|
194 |
+
getattr(ser.dt, prop)
|
195 |
+
|
196 |
+
result = ser.dt.components
|
197 |
+
assert isinstance(result, DataFrame)
|
198 |
+
tm.assert_index_equal(result.index, ser.index)
|
199 |
+
|
200 |
+
result = ser.dt.to_pytimedelta()
|
201 |
+
assert isinstance(result, np.ndarray)
|
202 |
+
assert result.dtype == object
|
203 |
+
|
204 |
+
result = ser.dt.total_seconds()
|
205 |
+
assert isinstance(result, Series)
|
206 |
+
assert result.dtype == "float64"
|
207 |
+
|
208 |
+
freq_result = ser.dt.freq
|
209 |
+
assert freq_result == TimedeltaIndex(ser.values, freq="infer").freq
|
210 |
+
|
211 |
+
def test_dt_namespace_accessor_period(self):
|
212 |
+
# GH#7207, GH#11128
|
213 |
+
# test .dt namespace accessor
|
214 |
+
|
215 |
+
# periodindex
|
216 |
+
pi = period_range("20130101", periods=5, freq="D")
|
217 |
+
ser = Series(pi, name="xxx")
|
218 |
+
|
219 |
+
for prop in ok_for_period:
|
220 |
+
# we test freq below
|
221 |
+
if prop != "freq":
|
222 |
+
self._compare(ser, prop)
|
223 |
+
|
224 |
+
for prop in ok_for_period_methods:
|
225 |
+
getattr(ser.dt, prop)
|
226 |
+
|
227 |
+
freq_result = ser.dt.freq
|
228 |
+
assert freq_result == PeriodIndex(ser.values).freq
|
229 |
+
|
230 |
+
def test_dt_namespace_accessor_index_and_values(self):
|
231 |
+
# both
|
232 |
+
index = date_range("20130101", periods=3, freq="D")
|
233 |
+
dti = date_range("20140204", periods=3, freq="s")
|
234 |
+
ser = Series(dti, index=index, name="xxx")
|
235 |
+
exp = Series(
|
236 |
+
np.array([2014, 2014, 2014], dtype="int32"), index=index, name="xxx"
|
237 |
+
)
|
238 |
+
tm.assert_series_equal(ser.dt.year, exp)
|
239 |
+
|
240 |
+
exp = Series(np.array([2, 2, 2], dtype="int32"), index=index, name="xxx")
|
241 |
+
tm.assert_series_equal(ser.dt.month, exp)
|
242 |
+
|
243 |
+
exp = Series(np.array([0, 1, 2], dtype="int32"), index=index, name="xxx")
|
244 |
+
tm.assert_series_equal(ser.dt.second, exp)
|
245 |
+
|
246 |
+
exp = Series([ser.iloc[0]] * 3, index=index, name="xxx")
|
247 |
+
tm.assert_series_equal(ser.dt.normalize(), exp)
|
248 |
+
|
249 |
+
def test_dt_accessor_limited_display_api(self):
|
250 |
+
# tznaive
|
251 |
+
ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx")
|
252 |
+
results = get_dir(ser)
|
253 |
+
tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods)))
|
254 |
+
|
255 |
+
# tzaware
|
256 |
+
ser = Series(date_range("2015-01-01", "2016-01-01", freq="min"), name="xxx")
|
257 |
+
ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago")
|
258 |
+
results = get_dir(ser)
|
259 |
+
tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods)))
|
260 |
+
|
261 |
+
# Period
|
262 |
+
idx = period_range("20130101", periods=5, freq="D", name="xxx").astype(object)
|
263 |
+
with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
|
264 |
+
ser = Series(idx)
|
265 |
+
results = get_dir(ser)
|
266 |
+
tm.assert_almost_equal(
|
267 |
+
results, sorted(set(ok_for_period + ok_for_period_methods))
|
268 |
+
)
|
269 |
+
|
270 |
+
def test_dt_accessor_ambiguous_freq_conversions(self):
|
271 |
+
# GH#11295
|
272 |
+
# ambiguous time error on the conversions
|
273 |
+
ser = Series(date_range("2015-01-01", "2016-01-01", freq="min"), name="xxx")
|
274 |
+
ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago")
|
275 |
+
|
276 |
+
exp_values = date_range(
|
277 |
+
"2015-01-01", "2016-01-01", freq="min", tz="UTC"
|
278 |
+
).tz_convert("America/Chicago")
|
279 |
+
# freq not preserved by tz_localize above
|
280 |
+
exp_values = exp_values._with_freq(None)
|
281 |
+
expected = Series(exp_values, name="xxx")
|
282 |
+
tm.assert_series_equal(ser, expected)
|
283 |
+
|
284 |
+
def test_dt_accessor_not_writeable(self, using_copy_on_write, warn_copy_on_write):
|
285 |
+
# no setting allowed
|
286 |
+
ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx")
|
287 |
+
with pytest.raises(ValueError, match="modifications"):
|
288 |
+
ser.dt.hour = 5
|
289 |
+
|
290 |
+
# trying to set a copy
|
291 |
+
msg = "modifications to a property of a datetimelike.+not supported"
|
292 |
+
with pd.option_context("chained_assignment", "raise"):
|
293 |
+
if using_copy_on_write:
|
294 |
+
with tm.raises_chained_assignment_error():
|
295 |
+
ser.dt.hour[0] = 5
|
296 |
+
elif warn_copy_on_write:
|
297 |
+
with tm.assert_produces_warning(
|
298 |
+
FutureWarning, match="ChainedAssignmentError"
|
299 |
+
):
|
300 |
+
ser.dt.hour[0] = 5
|
301 |
+
else:
|
302 |
+
with pytest.raises(SettingWithCopyError, match=msg):
|
303 |
+
ser.dt.hour[0] = 5
|
304 |
+
|
305 |
+
@pytest.mark.parametrize(
|
306 |
+
"method, dates",
|
307 |
+
[
|
308 |
+
["round", ["2012-01-02", "2012-01-02", "2012-01-01"]],
|
309 |
+
["floor", ["2012-01-01", "2012-01-01", "2012-01-01"]],
|
310 |
+
["ceil", ["2012-01-02", "2012-01-02", "2012-01-02"]],
|
311 |
+
],
|
312 |
+
)
|
313 |
+
def test_dt_round(self, method, dates):
|
314 |
+
# round
|
315 |
+
ser = Series(
|
316 |
+
pd.to_datetime(
|
317 |
+
["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"]
|
318 |
+
),
|
319 |
+
name="xxx",
|
320 |
+
)
|
321 |
+
result = getattr(ser.dt, method)("D")
|
322 |
+
expected = Series(pd.to_datetime(dates), name="xxx")
|
323 |
+
tm.assert_series_equal(result, expected)
|
324 |
+
|
325 |
+
def test_dt_round_tz(self):
|
326 |
+
ser = Series(
|
327 |
+
pd.to_datetime(
|
328 |
+
["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"]
|
329 |
+
),
|
330 |
+
name="xxx",
|
331 |
+
)
|
332 |
+
result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern").dt.round("D")
|
333 |
+
|
334 |
+
exp_values = pd.to_datetime(
|
335 |
+
["2012-01-01", "2012-01-01", "2012-01-01"]
|
336 |
+
).tz_localize("US/Eastern")
|
337 |
+
expected = Series(exp_values, name="xxx")
|
338 |
+
tm.assert_series_equal(result, expected)
|
339 |
+
|
340 |
+
@pytest.mark.parametrize("method", ["ceil", "round", "floor"])
|
341 |
+
def test_dt_round_tz_ambiguous(self, method):
|
342 |
+
# GH 18946 round near "fall back" DST
|
343 |
+
df1 = DataFrame(
|
344 |
+
[
|
345 |
+
pd.to_datetime("2017-10-29 02:00:00+02:00", utc=True),
|
346 |
+
pd.to_datetime("2017-10-29 02:00:00+01:00", utc=True),
|
347 |
+
pd.to_datetime("2017-10-29 03:00:00+01:00", utc=True),
|
348 |
+
],
|
349 |
+
columns=["date"],
|
350 |
+
)
|
351 |
+
df1["date"] = df1["date"].dt.tz_convert("Europe/Madrid")
|
352 |
+
# infer
|
353 |
+
result = getattr(df1.date.dt, method)("h", ambiguous="infer")
|
354 |
+
expected = df1["date"]
|
355 |
+
tm.assert_series_equal(result, expected)
|
356 |
+
|
357 |
+
# bool-array
|
358 |
+
result = getattr(df1.date.dt, method)("h", ambiguous=[True, False, False])
|
359 |
+
tm.assert_series_equal(result, expected)
|
360 |
+
|
361 |
+
# NaT
|
362 |
+
result = getattr(df1.date.dt, method)("h", ambiguous="NaT")
|
363 |
+
expected = df1["date"].copy()
|
364 |
+
expected.iloc[0:2] = pd.NaT
|
365 |
+
tm.assert_series_equal(result, expected)
|
366 |
+
|
367 |
+
# raise
|
368 |
+
with tm.external_error_raised(pytz.AmbiguousTimeError):
|
369 |
+
getattr(df1.date.dt, method)("h", ambiguous="raise")
|
370 |
+
|
371 |
+
@pytest.mark.parametrize(
|
372 |
+
"method, ts_str, freq",
|
373 |
+
[
|
374 |
+
["ceil", "2018-03-11 01:59:00-0600", "5min"],
|
375 |
+
["round", "2018-03-11 01:59:00-0600", "5min"],
|
376 |
+
["floor", "2018-03-11 03:01:00-0500", "2h"],
|
377 |
+
],
|
378 |
+
)
|
379 |
+
def test_dt_round_tz_nonexistent(self, method, ts_str, freq):
|
380 |
+
# GH 23324 round near "spring forward" DST
|
381 |
+
ser = Series([pd.Timestamp(ts_str, tz="America/Chicago")])
|
382 |
+
result = getattr(ser.dt, method)(freq, nonexistent="shift_forward")
|
383 |
+
expected = Series([pd.Timestamp("2018-03-11 03:00:00", tz="America/Chicago")])
|
384 |
+
tm.assert_series_equal(result, expected)
|
385 |
+
|
386 |
+
result = getattr(ser.dt, method)(freq, nonexistent="NaT")
|
387 |
+
expected = Series([pd.NaT]).dt.tz_localize(result.dt.tz)
|
388 |
+
tm.assert_series_equal(result, expected)
|
389 |
+
|
390 |
+
with pytest.raises(pytz.NonExistentTimeError, match="2018-03-11 02:00:00"):
|
391 |
+
getattr(ser.dt, method)(freq, nonexistent="raise")
|
392 |
+
|
393 |
+
@pytest.mark.parametrize("freq", ["ns", "us", "1000us"])
|
394 |
+
def test_dt_round_nonnano_higher_resolution_no_op(self, freq):
|
395 |
+
# GH 52761
|
396 |
+
ser = Series(
|
397 |
+
["2020-05-31 08:00:00", "2000-12-31 04:00:05", "1800-03-14 07:30:20"],
|
398 |
+
dtype="datetime64[ms]",
|
399 |
+
)
|
400 |
+
expected = ser.copy()
|
401 |
+
result = ser.dt.round(freq)
|
402 |
+
tm.assert_series_equal(result, expected)
|
403 |
+
|
404 |
+
assert not np.shares_memory(ser.array._ndarray, result.array._ndarray)
|
405 |
+
|
406 |
+
def test_dt_namespace_accessor_categorical(self):
|
407 |
+
# GH 19468
|
408 |
+
dti = DatetimeIndex(["20171111", "20181212"]).repeat(2)
|
409 |
+
ser = Series(pd.Categorical(dti), name="foo")
|
410 |
+
result = ser.dt.year
|
411 |
+
expected = Series([2017, 2017, 2018, 2018], dtype="int32", name="foo")
|
412 |
+
tm.assert_series_equal(result, expected)
|
413 |
+
|
414 |
+
def test_dt_tz_localize_categorical(self, tz_aware_fixture):
|
415 |
+
# GH 27952
|
416 |
+
tz = tz_aware_fixture
|
417 |
+
datetimes = Series(
|
418 |
+
["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns]"
|
419 |
+
)
|
420 |
+
categorical = datetimes.astype("category")
|
421 |
+
result = categorical.dt.tz_localize(tz)
|
422 |
+
expected = datetimes.dt.tz_localize(tz)
|
423 |
+
tm.assert_series_equal(result, expected)
|
424 |
+
|
425 |
+
def test_dt_tz_convert_categorical(self, tz_aware_fixture):
|
426 |
+
# GH 27952
|
427 |
+
tz = tz_aware_fixture
|
428 |
+
datetimes = Series(
|
429 |
+
["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns, MET]"
|
430 |
+
)
|
431 |
+
categorical = datetimes.astype("category")
|
432 |
+
result = categorical.dt.tz_convert(tz)
|
433 |
+
expected = datetimes.dt.tz_convert(tz)
|
434 |
+
tm.assert_series_equal(result, expected)
|
435 |
+
|
436 |
+
@pytest.mark.parametrize("accessor", ["year", "month", "day"])
|
437 |
+
def test_dt_other_accessors_categorical(self, accessor):
|
438 |
+
# GH 27952
|
439 |
+
datetimes = Series(
|
440 |
+
["2018-01-01", "2018-01-01", "2019-01-02"], dtype="datetime64[ns]"
|
441 |
+
)
|
442 |
+
categorical = datetimes.astype("category")
|
443 |
+
result = getattr(categorical.dt, accessor)
|
444 |
+
expected = getattr(datetimes.dt, accessor)
|
445 |
+
tm.assert_series_equal(result, expected)
|
446 |
+
|
447 |
+
def test_dt_accessor_no_new_attributes(self):
|
448 |
+
# https://github.com/pandas-dev/pandas/issues/10673
|
449 |
+
ser = Series(date_range("20130101", periods=5, freq="D"))
|
450 |
+
with pytest.raises(AttributeError, match="You cannot add any new attribute"):
|
451 |
+
ser.dt.xlabel = "a"
|
452 |
+
|
453 |
+
# error: Unsupported operand types for + ("List[None]" and "List[str]")
|
454 |
+
@pytest.mark.parametrize(
|
455 |
+
"time_locale", [None] + tm.get_locales() # type: ignore[operator]
|
456 |
+
)
|
457 |
+
def test_dt_accessor_datetime_name_accessors(self, time_locale):
|
458 |
+
# Test Monday -> Sunday and January -> December, in that sequence
|
459 |
+
if time_locale is None:
|
460 |
+
# If the time_locale is None, day-name and month_name should
|
461 |
+
# return the english attributes
|
462 |
+
expected_days = [
|
463 |
+
"Monday",
|
464 |
+
"Tuesday",
|
465 |
+
"Wednesday",
|
466 |
+
"Thursday",
|
467 |
+
"Friday",
|
468 |
+
"Saturday",
|
469 |
+
"Sunday",
|
470 |
+
]
|
471 |
+
expected_months = [
|
472 |
+
"January",
|
473 |
+
"February",
|
474 |
+
"March",
|
475 |
+
"April",
|
476 |
+
"May",
|
477 |
+
"June",
|
478 |
+
"July",
|
479 |
+
"August",
|
480 |
+
"September",
|
481 |
+
"October",
|
482 |
+
"November",
|
483 |
+
"December",
|
484 |
+
]
|
485 |
+
else:
|
486 |
+
with tm.set_locale(time_locale, locale.LC_TIME):
|
487 |
+
expected_days = calendar.day_name[:]
|
488 |
+
expected_months = calendar.month_name[1:]
|
489 |
+
|
490 |
+
ser = Series(date_range(freq="D", start=datetime(1998, 1, 1), periods=365))
|
491 |
+
english_days = [
|
492 |
+
"Monday",
|
493 |
+
"Tuesday",
|
494 |
+
"Wednesday",
|
495 |
+
"Thursday",
|
496 |
+
"Friday",
|
497 |
+
"Saturday",
|
498 |
+
"Sunday",
|
499 |
+
]
|
500 |
+
for day, name, eng_name in zip(range(4, 11), expected_days, english_days):
|
501 |
+
name = name.capitalize()
|
502 |
+
assert ser.dt.day_name(locale=time_locale)[day] == name
|
503 |
+
assert ser.dt.day_name(locale=None)[day] == eng_name
|
504 |
+
ser = pd.concat([ser, Series([pd.NaT])])
|
505 |
+
assert np.isnan(ser.dt.day_name(locale=time_locale).iloc[-1])
|
506 |
+
|
507 |
+
ser = Series(date_range(freq="ME", start="2012", end="2013"))
|
508 |
+
result = ser.dt.month_name(locale=time_locale)
|
509 |
+
expected = Series([month.capitalize() for month in expected_months])
|
510 |
+
|
511 |
+
# work around https://github.com/pandas-dev/pandas/issues/22342
|
512 |
+
result = result.str.normalize("NFD")
|
513 |
+
expected = expected.str.normalize("NFD")
|
514 |
+
|
515 |
+
tm.assert_series_equal(result, expected)
|
516 |
+
|
517 |
+
for s_date, expected in zip(ser, expected_months):
|
518 |
+
result = s_date.month_name(locale=time_locale)
|
519 |
+
expected = expected.capitalize()
|
520 |
+
|
521 |
+
result = unicodedata.normalize("NFD", result)
|
522 |
+
expected = unicodedata.normalize("NFD", expected)
|
523 |
+
|
524 |
+
assert result == expected
|
525 |
+
|
526 |
+
ser = pd.concat([ser, Series([pd.NaT])])
|
527 |
+
assert np.isnan(ser.dt.month_name(locale=time_locale).iloc[-1])
|
528 |
+
|
529 |
+
def test_strftime(self):
|
530 |
+
# GH 10086
|
531 |
+
ser = Series(date_range("20130101", periods=5))
|
532 |
+
result = ser.dt.strftime("%Y/%m/%d")
|
533 |
+
expected = Series(
|
534 |
+
["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"]
|
535 |
+
)
|
536 |
+
tm.assert_series_equal(result, expected)
|
537 |
+
|
538 |
+
ser = Series(date_range("2015-02-03 11:22:33.4567", periods=5))
|
539 |
+
result = ser.dt.strftime("%Y/%m/%d %H-%M-%S")
|
540 |
+
expected = Series(
|
541 |
+
[
|
542 |
+
"2015/02/03 11-22-33",
|
543 |
+
"2015/02/04 11-22-33",
|
544 |
+
"2015/02/05 11-22-33",
|
545 |
+
"2015/02/06 11-22-33",
|
546 |
+
"2015/02/07 11-22-33",
|
547 |
+
]
|
548 |
+
)
|
549 |
+
tm.assert_series_equal(result, expected)
|
550 |
+
|
551 |
+
ser = Series(period_range("20130101", periods=5))
|
552 |
+
result = ser.dt.strftime("%Y/%m/%d")
|
553 |
+
expected = Series(
|
554 |
+
["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"]
|
555 |
+
)
|
556 |
+
tm.assert_series_equal(result, expected)
|
557 |
+
|
558 |
+
ser = Series(period_range("2015-02-03 11:22:33.4567", periods=5, freq="s"))
|
559 |
+
result = ser.dt.strftime("%Y/%m/%d %H-%M-%S")
|
560 |
+
expected = Series(
|
561 |
+
[
|
562 |
+
"2015/02/03 11-22-33",
|
563 |
+
"2015/02/03 11-22-34",
|
564 |
+
"2015/02/03 11-22-35",
|
565 |
+
"2015/02/03 11-22-36",
|
566 |
+
"2015/02/03 11-22-37",
|
567 |
+
]
|
568 |
+
)
|
569 |
+
tm.assert_series_equal(result, expected)
|
570 |
+
|
571 |
+
def test_strftime_dt64_days(self):
|
572 |
+
ser = Series(date_range("20130101", periods=5))
|
573 |
+
ser.iloc[0] = pd.NaT
|
574 |
+
result = ser.dt.strftime("%Y/%m/%d")
|
575 |
+
expected = Series(
|
576 |
+
[np.nan, "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"]
|
577 |
+
)
|
578 |
+
tm.assert_series_equal(result, expected)
|
579 |
+
|
580 |
+
datetime_index = date_range("20150301", periods=5)
|
581 |
+
result = datetime_index.strftime("%Y/%m/%d")
|
582 |
+
|
583 |
+
expected = Index(
|
584 |
+
["2015/03/01", "2015/03/02", "2015/03/03", "2015/03/04", "2015/03/05"],
|
585 |
+
dtype=np.object_,
|
586 |
+
)
|
587 |
+
# dtype may be S10 or U10 depending on python version
|
588 |
+
tm.assert_index_equal(result, expected)
|
589 |
+
|
590 |
+
def test_strftime_period_days(self, using_infer_string):
|
591 |
+
period_index = period_range("20150301", periods=5)
|
592 |
+
result = period_index.strftime("%Y/%m/%d")
|
593 |
+
expected = Index(
|
594 |
+
["2015/03/01", "2015/03/02", "2015/03/03", "2015/03/04", "2015/03/05"],
|
595 |
+
dtype="=U10",
|
596 |
+
)
|
597 |
+
if using_infer_string:
|
598 |
+
expected = expected.astype("string[pyarrow_numpy]")
|
599 |
+
tm.assert_index_equal(result, expected)
|
600 |
+
|
601 |
+
def test_strftime_dt64_microsecond_resolution(self):
|
602 |
+
ser = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)])
|
603 |
+
result = ser.dt.strftime("%Y-%m-%d %H:%M:%S")
|
604 |
+
expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"])
|
605 |
+
tm.assert_series_equal(result, expected)
|
606 |
+
|
607 |
+
def test_strftime_period_hours(self):
|
608 |
+
ser = Series(period_range("20130101", periods=4, freq="h"))
|
609 |
+
result = ser.dt.strftime("%Y/%m/%d %H:%M:%S")
|
610 |
+
expected = Series(
|
611 |
+
[
|
612 |
+
"2013/01/01 00:00:00",
|
613 |
+
"2013/01/01 01:00:00",
|
614 |
+
"2013/01/01 02:00:00",
|
615 |
+
"2013/01/01 03:00:00",
|
616 |
+
]
|
617 |
+
)
|
618 |
+
tm.assert_series_equal(result, expected)
|
619 |
+
|
620 |
+
def test_strftime_period_minutes(self):
|
621 |
+
ser = Series(period_range("20130101", periods=4, freq="ms"))
|
622 |
+
result = ser.dt.strftime("%Y/%m/%d %H:%M:%S.%l")
|
623 |
+
expected = Series(
|
624 |
+
[
|
625 |
+
"2013/01/01 00:00:00.000",
|
626 |
+
"2013/01/01 00:00:00.001",
|
627 |
+
"2013/01/01 00:00:00.002",
|
628 |
+
"2013/01/01 00:00:00.003",
|
629 |
+
]
|
630 |
+
)
|
631 |
+
tm.assert_series_equal(result, expected)
|
632 |
+
|
633 |
+
@pytest.mark.parametrize(
|
634 |
+
"data",
|
635 |
+
[
|
636 |
+
DatetimeIndex(["2019-01-01", pd.NaT]),
|
637 |
+
PeriodIndex(["2019-01-01", pd.NaT], dtype="period[D]"),
|
638 |
+
],
|
639 |
+
)
|
640 |
+
def test_strftime_nat(self, data):
|
641 |
+
# GH 29578
|
642 |
+
ser = Series(data)
|
643 |
+
result = ser.dt.strftime("%Y-%m-%d")
|
644 |
+
expected = Series(["2019-01-01", np.nan])
|
645 |
+
tm.assert_series_equal(result, expected)
|
646 |
+
|
647 |
+
@pytest.mark.parametrize(
|
648 |
+
"data", [DatetimeIndex([pd.NaT]), PeriodIndex([pd.NaT], dtype="period[D]")]
|
649 |
+
)
|
650 |
+
def test_strftime_all_nat(self, data):
|
651 |
+
# https://github.com/pandas-dev/pandas/issues/45858
|
652 |
+
ser = Series(data)
|
653 |
+
with tm.assert_produces_warning(None):
|
654 |
+
result = ser.dt.strftime("%Y-%m-%d")
|
655 |
+
expected = Series([np.nan], dtype=object)
|
656 |
+
tm.assert_series_equal(result, expected)
|
657 |
+
|
658 |
+
def test_valid_dt_with_missing_values(self):
|
659 |
+
# GH 8689
|
660 |
+
ser = Series(date_range("20130101", periods=5, freq="D"))
|
661 |
+
ser.iloc[2] = pd.NaT
|
662 |
+
|
663 |
+
for attr in ["microsecond", "nanosecond", "second", "minute", "hour", "day"]:
|
664 |
+
expected = getattr(ser.dt, attr).copy()
|
665 |
+
expected.iloc[2] = np.nan
|
666 |
+
result = getattr(ser.dt, attr)
|
667 |
+
tm.assert_series_equal(result, expected)
|
668 |
+
|
669 |
+
result = ser.dt.date
|
670 |
+
expected = Series(
|
671 |
+
[
|
672 |
+
date(2013, 1, 1),
|
673 |
+
date(2013, 1, 2),
|
674 |
+
pd.NaT,
|
675 |
+
date(2013, 1, 4),
|
676 |
+
date(2013, 1, 5),
|
677 |
+
],
|
678 |
+
dtype="object",
|
679 |
+
)
|
680 |
+
tm.assert_series_equal(result, expected)
|
681 |
+
|
682 |
+
result = ser.dt.time
|
683 |
+
expected = Series([time(0), time(0), pd.NaT, time(0), time(0)], dtype="object")
|
684 |
+
tm.assert_series_equal(result, expected)
|
685 |
+
|
686 |
+
def test_dt_accessor_api(self):
|
687 |
+
# GH 9322
|
688 |
+
from pandas.core.indexes.accessors import (
|
689 |
+
CombinedDatetimelikeProperties,
|
690 |
+
DatetimeProperties,
|
691 |
+
)
|
692 |
+
|
693 |
+
assert Series.dt is CombinedDatetimelikeProperties
|
694 |
+
|
695 |
+
ser = Series(date_range("2000-01-01", periods=3))
|
696 |
+
assert isinstance(ser.dt, DatetimeProperties)
|
697 |
+
|
698 |
+
@pytest.mark.parametrize(
|
699 |
+
"ser",
|
700 |
+
[
|
701 |
+
Series(np.arange(5)),
|
702 |
+
Series(list("abcde")),
|
703 |
+
Series(np.random.default_rng(2).standard_normal(5)),
|
704 |
+
],
|
705 |
+
)
|
706 |
+
def test_dt_accessor_invalid(self, ser):
|
707 |
+
# GH#9322 check that series with incorrect dtypes don't have attr
|
708 |
+
with pytest.raises(AttributeError, match="only use .dt accessor"):
|
709 |
+
ser.dt
|
710 |
+
assert not hasattr(ser, "dt")
|
711 |
+
|
712 |
+
def test_dt_accessor_updates_on_inplace(self):
|
713 |
+
ser = Series(date_range("2018-01-01", periods=10))
|
714 |
+
ser[2] = None
|
715 |
+
return_value = ser.fillna(pd.Timestamp("2018-01-01"), inplace=True)
|
716 |
+
assert return_value is None
|
717 |
+
result = ser.dt.date
|
718 |
+
assert result[0] == result[2]
|
719 |
+
|
720 |
+
def test_date_tz(self):
|
721 |
+
# GH11757
|
722 |
+
rng = DatetimeIndex(
|
723 |
+
["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"],
|
724 |
+
tz="US/Eastern",
|
725 |
+
)
|
726 |
+
ser = Series(rng)
|
727 |
+
expected = Series([date(2014, 4, 4), date(2014, 7, 18), date(2015, 11, 22)])
|
728 |
+
tm.assert_series_equal(ser.dt.date, expected)
|
729 |
+
tm.assert_series_equal(ser.apply(lambda x: x.date()), expected)
|
730 |
+
|
731 |
+
def test_dt_timetz_accessor(self, tz_naive_fixture):
|
732 |
+
# GH21358
|
733 |
+
tz = maybe_get_tz(tz_naive_fixture)
|
734 |
+
|
735 |
+
dtindex = DatetimeIndex(
|
736 |
+
["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz=tz
|
737 |
+
)
|
738 |
+
ser = Series(dtindex)
|
739 |
+
expected = Series(
|
740 |
+
[time(23, 56, tzinfo=tz), time(21, 24, tzinfo=tz), time(22, 14, tzinfo=tz)]
|
741 |
+
)
|
742 |
+
result = ser.dt.timetz
|
743 |
+
tm.assert_series_equal(result, expected)
|
744 |
+
|
745 |
+
@pytest.mark.parametrize(
|
746 |
+
"input_series, expected_output",
|
747 |
+
[
|
748 |
+
[["2020-01-01"], [[2020, 1, 3]]],
|
749 |
+
[[pd.NaT], [[np.nan, np.nan, np.nan]]],
|
750 |
+
[["2019-12-31", "2019-12-29"], [[2020, 1, 2], [2019, 52, 7]]],
|
751 |
+
[["2010-01-01", pd.NaT], [[2009, 53, 5], [np.nan, np.nan, np.nan]]],
|
752 |
+
# see GH#36032
|
753 |
+
[["2016-01-08", "2016-01-04"], [[2016, 1, 5], [2016, 1, 1]]],
|
754 |
+
[["2016-01-07", "2016-01-01"], [[2016, 1, 4], [2015, 53, 5]]],
|
755 |
+
],
|
756 |
+
)
|
757 |
+
def test_isocalendar(self, input_series, expected_output):
|
758 |
+
result = pd.to_datetime(Series(input_series)).dt.isocalendar()
|
759 |
+
expected_frame = DataFrame(
|
760 |
+
expected_output, columns=["year", "week", "day"], dtype="UInt32"
|
761 |
+
)
|
762 |
+
tm.assert_frame_equal(result, expected_frame)
|
763 |
+
|
764 |
+
def test_hour_index(self):
|
765 |
+
dt_series = Series(
|
766 |
+
date_range(start="2021-01-01", periods=5, freq="h"),
|
767 |
+
index=[2, 6, 7, 8, 11],
|
768 |
+
dtype="category",
|
769 |
+
)
|
770 |
+
result = dt_series.dt.hour
|
771 |
+
expected = Series(
|
772 |
+
[0, 1, 2, 3, 4],
|
773 |
+
dtype="int32",
|
774 |
+
index=[2, 6, 7, 8, 11],
|
775 |
+
)
|
776 |
+
tm.assert_series_equal(result, expected)
|
777 |
+
|
778 |
+
|
779 |
+
class TestSeriesPeriodValuesDtAccessor:
|
780 |
+
@pytest.mark.parametrize(
|
781 |
+
"input_vals",
|
782 |
+
[
|
783 |
+
[Period("2016-01", freq="M"), Period("2016-02", freq="M")],
|
784 |
+
[Period("2016-01-01", freq="D"), Period("2016-01-02", freq="D")],
|
785 |
+
[
|
786 |
+
Period("2016-01-01 00:00:00", freq="h"),
|
787 |
+
Period("2016-01-01 01:00:00", freq="h"),
|
788 |
+
],
|
789 |
+
[
|
790 |
+
Period("2016-01-01 00:00:00", freq="M"),
|
791 |
+
Period("2016-01-01 00:01:00", freq="M"),
|
792 |
+
],
|
793 |
+
[
|
794 |
+
Period("2016-01-01 00:00:00", freq="s"),
|
795 |
+
Period("2016-01-01 00:00:01", freq="s"),
|
796 |
+
],
|
797 |
+
],
|
798 |
+
)
|
799 |
+
def test_end_time_timevalues(self, input_vals):
|
800 |
+
# GH#17157
|
801 |
+
# Check that the time part of the Period is adjusted by end_time
|
802 |
+
# when using the dt accessor on a Series
|
803 |
+
input_vals = PeriodArray._from_sequence(np.asarray(input_vals))
|
804 |
+
|
805 |
+
ser = Series(input_vals)
|
806 |
+
result = ser.dt.end_time
|
807 |
+
expected = ser.apply(lambda x: x.end_time)
|
808 |
+
tm.assert_series_equal(result, expected)
|
809 |
+
|
810 |
+
@pytest.mark.parametrize("input_vals", [("2001"), ("NaT")])
|
811 |
+
def test_to_period(self, input_vals):
|
812 |
+
# GH#21205
|
813 |
+
expected = Series([input_vals], dtype="Period[D]")
|
814 |
+
result = Series([input_vals], dtype="datetime64[ns]").dt.to_period("D")
|
815 |
+
tm.assert_series_equal(result, expected)
|
816 |
+
|
817 |
+
|
818 |
+
def test_normalize_pre_epoch_dates():
|
819 |
+
# GH: 36294
|
820 |
+
ser = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"]))
|
821 |
+
result = ser.dt.normalize()
|
822 |
+
expected = pd.to_datetime(Series(["1969-01-01", "2016-01-01"]))
|
823 |
+
tm.assert_series_equal(result, expected)
|
824 |
+
|
825 |
+
|
826 |
+
def test_day_attribute_non_nano_beyond_int32():
|
827 |
+
# GH 52386
|
828 |
+
data = np.array(
|
829 |
+
[
|
830 |
+
136457654736252,
|
831 |
+
134736784364431,
|
832 |
+
245345345545332,
|
833 |
+
223432411,
|
834 |
+
2343241,
|
835 |
+
3634548734,
|
836 |
+
23234,
|
837 |
+
],
|
838 |
+
dtype="timedelta64[s]",
|
839 |
+
)
|
840 |
+
ser = Series(data)
|
841 |
+
result = ser.dt.days
|
842 |
+
expected = Series([1579371003, 1559453522, 2839645203, 2586, 27, 42066, 0])
|
843 |
+
tm.assert_series_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas import (
|
6 |
+
ArrowDtype,
|
7 |
+
Series,
|
8 |
+
)
|
9 |
+
import pandas._testing as tm
|
10 |
+
|
11 |
+
pa = pytest.importorskip("pyarrow")
|
12 |
+
|
13 |
+
from pandas.compat import pa_version_under11p0
|
14 |
+
|
15 |
+
|
16 |
+
@pytest.mark.parametrize(
|
17 |
+
"list_dtype",
|
18 |
+
(
|
19 |
+
pa.list_(pa.int64()),
|
20 |
+
pa.list_(pa.int64(), list_size=3),
|
21 |
+
pa.large_list(pa.int64()),
|
22 |
+
),
|
23 |
+
)
|
24 |
+
def test_list_getitem(list_dtype):
|
25 |
+
ser = Series(
|
26 |
+
[[1, 2, 3], [4, None, 5], None],
|
27 |
+
dtype=ArrowDtype(list_dtype),
|
28 |
+
)
|
29 |
+
actual = ser.list[1]
|
30 |
+
expected = Series([2, None, None], dtype="int64[pyarrow]")
|
31 |
+
tm.assert_series_equal(actual, expected)
|
32 |
+
|
33 |
+
|
34 |
+
def test_list_getitem_slice():
|
35 |
+
ser = Series(
|
36 |
+
[[1, 2, 3], [4, None, 5], None],
|
37 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
38 |
+
)
|
39 |
+
if pa_version_under11p0:
|
40 |
+
with pytest.raises(
|
41 |
+
NotImplementedError, match="List slice not supported by pyarrow "
|
42 |
+
):
|
43 |
+
ser.list[1:None:None]
|
44 |
+
else:
|
45 |
+
actual = ser.list[1:None:None]
|
46 |
+
expected = Series(
|
47 |
+
[[2, 3], [None, 5], None], dtype=ArrowDtype(pa.list_(pa.int64()))
|
48 |
+
)
|
49 |
+
tm.assert_series_equal(actual, expected)
|
50 |
+
|
51 |
+
|
52 |
+
def test_list_len():
|
53 |
+
ser = Series(
|
54 |
+
[[1, 2, 3], [4, None], None],
|
55 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
56 |
+
)
|
57 |
+
actual = ser.list.len()
|
58 |
+
expected = Series([3, 2, None], dtype=ArrowDtype(pa.int32()))
|
59 |
+
tm.assert_series_equal(actual, expected)
|
60 |
+
|
61 |
+
|
62 |
+
def test_list_flatten():
|
63 |
+
ser = Series(
|
64 |
+
[[1, 2, 3], [4, None], None],
|
65 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
66 |
+
)
|
67 |
+
actual = ser.list.flatten()
|
68 |
+
expected = Series([1, 2, 3, 4, None], dtype=ArrowDtype(pa.int64()))
|
69 |
+
tm.assert_series_equal(actual, expected)
|
70 |
+
|
71 |
+
|
72 |
+
def test_list_getitem_slice_invalid():
|
73 |
+
ser = Series(
|
74 |
+
[[1, 2, 3], [4, None, 5], None],
|
75 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
76 |
+
)
|
77 |
+
if pa_version_under11p0:
|
78 |
+
with pytest.raises(
|
79 |
+
NotImplementedError, match="List slice not supported by pyarrow "
|
80 |
+
):
|
81 |
+
ser.list[1:None:0]
|
82 |
+
else:
|
83 |
+
with pytest.raises(pa.lib.ArrowInvalid, match=re.escape("`step` must be >= 1")):
|
84 |
+
ser.list[1:None:0]
|
85 |
+
|
86 |
+
|
87 |
+
def test_list_accessor_non_list_dtype():
|
88 |
+
ser = Series(
|
89 |
+
[1, 2, 4],
|
90 |
+
dtype=ArrowDtype(pa.int64()),
|
91 |
+
)
|
92 |
+
with pytest.raises(
|
93 |
+
AttributeError,
|
94 |
+
match=re.escape(
|
95 |
+
"Can only use the '.list' accessor with 'list[pyarrow]' dtype, "
|
96 |
+
"not int64[pyarrow]."
|
97 |
+
),
|
98 |
+
):
|
99 |
+
ser.list[1:None:0]
|
100 |
+
|
101 |
+
|
102 |
+
@pytest.mark.parametrize(
|
103 |
+
"list_dtype",
|
104 |
+
(
|
105 |
+
pa.list_(pa.int64()),
|
106 |
+
pa.list_(pa.int64(), list_size=3),
|
107 |
+
pa.large_list(pa.int64()),
|
108 |
+
),
|
109 |
+
)
|
110 |
+
def test_list_getitem_invalid_index(list_dtype):
|
111 |
+
ser = Series(
|
112 |
+
[[1, 2, 3], [4, None, 5], None],
|
113 |
+
dtype=ArrowDtype(list_dtype),
|
114 |
+
)
|
115 |
+
with pytest.raises(pa.lib.ArrowInvalid, match="Index -1 is out of bounds"):
|
116 |
+
ser.list[-1]
|
117 |
+
with pytest.raises(pa.lib.ArrowInvalid, match="Index 5 is out of bounds"):
|
118 |
+
ser.list[5]
|
119 |
+
with pytest.raises(ValueError, match="key must be an int or slice, got str"):
|
120 |
+
ser.list["abc"]
|
121 |
+
|
122 |
+
|
123 |
+
def test_list_accessor_not_iterable():
|
124 |
+
ser = Series(
|
125 |
+
[[1, 2, 3], [4, None], None],
|
126 |
+
dtype=ArrowDtype(pa.list_(pa.int64())),
|
127 |
+
)
|
128 |
+
with pytest.raises(TypeError, match="'ListAccessor' object is not iterable"):
|
129 |
+
iter(ser.list)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas import Series
|
2 |
+
|
3 |
+
|
4 |
+
class TestSparseAccessor:
|
5 |
+
def test_sparse_accessor_updates_on_inplace(self):
|
6 |
+
ser = Series([1, 1, 2, 3], dtype="Sparse[int]")
|
7 |
+
return_value = ser.drop([0, 1], inplace=True)
|
8 |
+
assert return_value is None
|
9 |
+
assert ser.sparse.density == 1.0
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from pandas import Series
|
4 |
+
import pandas._testing as tm
|
5 |
+
|
6 |
+
|
7 |
+
class TestStrAccessor:
|
8 |
+
def test_str_attribute(self):
|
9 |
+
# GH#9068
|
10 |
+
methods = ["strip", "rstrip", "lstrip"]
|
11 |
+
ser = Series([" jack", "jill ", " jesse ", "frank"])
|
12 |
+
for method in methods:
|
13 |
+
expected = Series([getattr(str, method)(x) for x in ser.values])
|
14 |
+
tm.assert_series_equal(getattr(Series.str, method)(ser.str), expected)
|
15 |
+
|
16 |
+
# str accessor only valid with string values
|
17 |
+
ser = Series(range(5))
|
18 |
+
with pytest.raises(AttributeError, match="only use .str accessor"):
|
19 |
+
ser.str.repeat(2)
|
20 |
+
|
21 |
+
def test_str_accessor_updates_on_inplace(self):
|
22 |
+
ser = Series(list("abc"))
|
23 |
+
return_value = ser.drop([0], inplace=True)
|
24 |
+
assert return_value is None
|
25 |
+
assert len(ser.str.lower()) == 2
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/accessors/test_struct_accessor.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas.compat.pyarrow import (
|
6 |
+
pa_version_under11p0,
|
7 |
+
pa_version_under13p0,
|
8 |
+
)
|
9 |
+
|
10 |
+
from pandas import (
|
11 |
+
ArrowDtype,
|
12 |
+
DataFrame,
|
13 |
+
Index,
|
14 |
+
Series,
|
15 |
+
)
|
16 |
+
import pandas._testing as tm
|
17 |
+
|
18 |
+
pa = pytest.importorskip("pyarrow")
|
19 |
+
pc = pytest.importorskip("pyarrow.compute")
|
20 |
+
|
21 |
+
|
22 |
+
def test_struct_accessor_dtypes():
|
23 |
+
ser = Series(
|
24 |
+
[],
|
25 |
+
dtype=ArrowDtype(
|
26 |
+
pa.struct(
|
27 |
+
[
|
28 |
+
("int_col", pa.int64()),
|
29 |
+
("string_col", pa.string()),
|
30 |
+
(
|
31 |
+
"struct_col",
|
32 |
+
pa.struct(
|
33 |
+
[
|
34 |
+
("int_col", pa.int64()),
|
35 |
+
("float_col", pa.float64()),
|
36 |
+
]
|
37 |
+
),
|
38 |
+
),
|
39 |
+
]
|
40 |
+
)
|
41 |
+
),
|
42 |
+
)
|
43 |
+
actual = ser.struct.dtypes
|
44 |
+
expected = Series(
|
45 |
+
[
|
46 |
+
ArrowDtype(pa.int64()),
|
47 |
+
ArrowDtype(pa.string()),
|
48 |
+
ArrowDtype(
|
49 |
+
pa.struct(
|
50 |
+
[
|
51 |
+
("int_col", pa.int64()),
|
52 |
+
("float_col", pa.float64()),
|
53 |
+
]
|
54 |
+
)
|
55 |
+
),
|
56 |
+
],
|
57 |
+
index=Index(["int_col", "string_col", "struct_col"]),
|
58 |
+
)
|
59 |
+
tm.assert_series_equal(actual, expected)
|
60 |
+
|
61 |
+
|
62 |
+
@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required")
|
63 |
+
def test_struct_accessor_field():
|
64 |
+
index = Index([-100, 42, 123])
|
65 |
+
ser = Series(
|
66 |
+
[
|
67 |
+
{"rice": 1.0, "maize": -1, "wheat": "a"},
|
68 |
+
{"rice": 2.0, "maize": 0, "wheat": "b"},
|
69 |
+
{"rice": 3.0, "maize": 1, "wheat": "c"},
|
70 |
+
],
|
71 |
+
dtype=ArrowDtype(
|
72 |
+
pa.struct(
|
73 |
+
[
|
74 |
+
("rice", pa.float64()),
|
75 |
+
("maize", pa.int64()),
|
76 |
+
("wheat", pa.string()),
|
77 |
+
]
|
78 |
+
)
|
79 |
+
),
|
80 |
+
index=index,
|
81 |
+
)
|
82 |
+
by_name = ser.struct.field("maize")
|
83 |
+
by_name_expected = Series(
|
84 |
+
[-1, 0, 1],
|
85 |
+
dtype=ArrowDtype(pa.int64()),
|
86 |
+
index=index,
|
87 |
+
name="maize",
|
88 |
+
)
|
89 |
+
tm.assert_series_equal(by_name, by_name_expected)
|
90 |
+
|
91 |
+
by_index = ser.struct.field(2)
|
92 |
+
by_index_expected = Series(
|
93 |
+
["a", "b", "c"],
|
94 |
+
dtype=ArrowDtype(pa.string()),
|
95 |
+
index=index,
|
96 |
+
name="wheat",
|
97 |
+
)
|
98 |
+
tm.assert_series_equal(by_index, by_index_expected)
|
99 |
+
|
100 |
+
|
101 |
+
def test_struct_accessor_field_with_invalid_name_or_index():
|
102 |
+
ser = Series([], dtype=ArrowDtype(pa.struct([("field", pa.int64())])))
|
103 |
+
|
104 |
+
with pytest.raises(ValueError, match="name_or_index must be an int, str,"):
|
105 |
+
ser.struct.field(1.1)
|
106 |
+
|
107 |
+
|
108 |
+
@pytest.mark.skipif(pa_version_under11p0, reason="pyarrow>=11.0.0 required")
|
109 |
+
def test_struct_accessor_explode():
|
110 |
+
index = Index([-100, 42, 123])
|
111 |
+
ser = Series(
|
112 |
+
[
|
113 |
+
{"painted": 1, "snapping": {"sea": "green"}},
|
114 |
+
{"painted": 2, "snapping": {"sea": "leatherback"}},
|
115 |
+
{"painted": 3, "snapping": {"sea": "hawksbill"}},
|
116 |
+
],
|
117 |
+
dtype=ArrowDtype(
|
118 |
+
pa.struct(
|
119 |
+
[
|
120 |
+
("painted", pa.int64()),
|
121 |
+
("snapping", pa.struct([("sea", pa.string())])),
|
122 |
+
]
|
123 |
+
)
|
124 |
+
),
|
125 |
+
index=index,
|
126 |
+
)
|
127 |
+
actual = ser.struct.explode()
|
128 |
+
expected = DataFrame(
|
129 |
+
{
|
130 |
+
"painted": Series([1, 2, 3], index=index, dtype=ArrowDtype(pa.int64())),
|
131 |
+
"snapping": Series(
|
132 |
+
[{"sea": "green"}, {"sea": "leatherback"}, {"sea": "hawksbill"}],
|
133 |
+
index=index,
|
134 |
+
dtype=ArrowDtype(pa.struct([("sea", pa.string())])),
|
135 |
+
),
|
136 |
+
},
|
137 |
+
)
|
138 |
+
tm.assert_frame_equal(actual, expected)
|
139 |
+
|
140 |
+
|
141 |
+
@pytest.mark.parametrize(
|
142 |
+
"invalid",
|
143 |
+
[
|
144 |
+
pytest.param(Series([1, 2, 3], dtype="int64"), id="int64"),
|
145 |
+
pytest.param(
|
146 |
+
Series(["a", "b", "c"], dtype="string[pyarrow]"), id="string-pyarrow"
|
147 |
+
),
|
148 |
+
],
|
149 |
+
)
|
150 |
+
def test_struct_accessor_api_for_invalid(invalid):
|
151 |
+
with pytest.raises(
|
152 |
+
AttributeError,
|
153 |
+
match=re.escape(
|
154 |
+
"Can only use the '.struct' accessor with 'struct[pyarrow]' dtype, "
|
155 |
+
f"not {invalid.dtype}."
|
156 |
+
),
|
157 |
+
):
|
158 |
+
invalid.struct
|
159 |
+
|
160 |
+
|
161 |
+
@pytest.mark.parametrize(
|
162 |
+
["indices", "name"],
|
163 |
+
[
|
164 |
+
(0, "int_col"),
|
165 |
+
([1, 2], "str_col"),
|
166 |
+
(pc.field("int_col"), "int_col"),
|
167 |
+
("int_col", "int_col"),
|
168 |
+
(b"string_col", b"string_col"),
|
169 |
+
([b"string_col"], "string_col"),
|
170 |
+
],
|
171 |
+
)
|
172 |
+
@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required")
|
173 |
+
def test_struct_accessor_field_expanded(indices, name):
|
174 |
+
arrow_type = pa.struct(
|
175 |
+
[
|
176 |
+
("int_col", pa.int64()),
|
177 |
+
(
|
178 |
+
"struct_col",
|
179 |
+
pa.struct(
|
180 |
+
[
|
181 |
+
("int_col", pa.int64()),
|
182 |
+
("float_col", pa.float64()),
|
183 |
+
("str_col", pa.string()),
|
184 |
+
]
|
185 |
+
),
|
186 |
+
),
|
187 |
+
(b"string_col", pa.string()),
|
188 |
+
]
|
189 |
+
)
|
190 |
+
|
191 |
+
data = pa.array([], type=arrow_type)
|
192 |
+
ser = Series(data, dtype=ArrowDtype(arrow_type))
|
193 |
+
expected = pc.struct_field(data, indices)
|
194 |
+
result = ser.struct.field(indices)
|
195 |
+
tm.assert_equal(result.array._pa_array.combine_chunks(), expected)
|
196 |
+
assert result.name == name
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/indexing/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (193 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/series/indexing/__pycache__/test_datetime.cpython-310.pyc
ADDED
Binary file (11.8 kB). View file
|
|