Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.cpython-310-x86_64-linux-gnu.so +3 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/__init__.py +87 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/base.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/ccalendar.pyi +12 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.pyi +14 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/dtypes.pyi +83 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.pyi +62 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/np_datetime.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.pyi +33 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.pyi +14 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.pyi +174 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.pyi +241 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.pyi +21 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.pyi +43 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__init__.py +9 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py +303 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py +1448 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py +410 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py +1387 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/readers.py +2383 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_api.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_types.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_api.py +383 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_types.py +62 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_algos.py +89 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_analytics.py +349 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_missing.py +216 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_replace.py +111 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_repr.py +550 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_array.py +478 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimelike.py +1340 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimes.py +840 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_timedeltas.py +313 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_config.cpython-310.pyc +0 -0
.gitattributes
CHANGED
@@ -169,3 +169,4 @@ env-llmeval/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64
|
|
169 |
env-llmeval/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
170 |
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
171 |
env-llmeval/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
169 |
env-llmeval/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
170 |
env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
171 |
env-llmeval/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
172 |
+
env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.cpython-310-x86_64-linux-gnu.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97df0241d7470fafc2785aaef2c2c4216803f45190f2c617a9042fecfc04678b
|
3 |
+
size 1409928
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/__init__.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__all__ = [
|
2 |
+
"dtypes",
|
3 |
+
"localize_pydatetime",
|
4 |
+
"NaT",
|
5 |
+
"NaTType",
|
6 |
+
"iNaT",
|
7 |
+
"nat_strings",
|
8 |
+
"OutOfBoundsDatetime",
|
9 |
+
"OutOfBoundsTimedelta",
|
10 |
+
"IncompatibleFrequency",
|
11 |
+
"Period",
|
12 |
+
"Resolution",
|
13 |
+
"Timedelta",
|
14 |
+
"normalize_i8_timestamps",
|
15 |
+
"is_date_array_normalized",
|
16 |
+
"dt64arr_to_periodarr",
|
17 |
+
"delta_to_nanoseconds",
|
18 |
+
"ints_to_pydatetime",
|
19 |
+
"ints_to_pytimedelta",
|
20 |
+
"get_resolution",
|
21 |
+
"Timestamp",
|
22 |
+
"tz_convert_from_utc_single",
|
23 |
+
"tz_convert_from_utc",
|
24 |
+
"to_offset",
|
25 |
+
"Tick",
|
26 |
+
"BaseOffset",
|
27 |
+
"tz_compare",
|
28 |
+
"is_unitless",
|
29 |
+
"astype_overflowsafe",
|
30 |
+
"get_unit_from_dtype",
|
31 |
+
"periods_per_day",
|
32 |
+
"periods_per_second",
|
33 |
+
"guess_datetime_format",
|
34 |
+
"add_overflowsafe",
|
35 |
+
"get_supported_dtype",
|
36 |
+
"is_supported_dtype",
|
37 |
+
]
|
38 |
+
|
39 |
+
from pandas._libs.tslibs import dtypes # pylint: disable=import-self
|
40 |
+
from pandas._libs.tslibs.conversion import localize_pydatetime
|
41 |
+
from pandas._libs.tslibs.dtypes import (
|
42 |
+
Resolution,
|
43 |
+
periods_per_day,
|
44 |
+
periods_per_second,
|
45 |
+
)
|
46 |
+
from pandas._libs.tslibs.nattype import (
|
47 |
+
NaT,
|
48 |
+
NaTType,
|
49 |
+
iNaT,
|
50 |
+
nat_strings,
|
51 |
+
)
|
52 |
+
from pandas._libs.tslibs.np_datetime import (
|
53 |
+
OutOfBoundsDatetime,
|
54 |
+
OutOfBoundsTimedelta,
|
55 |
+
add_overflowsafe,
|
56 |
+
astype_overflowsafe,
|
57 |
+
get_supported_dtype,
|
58 |
+
is_supported_dtype,
|
59 |
+
is_unitless,
|
60 |
+
py_get_unit_from_dtype as get_unit_from_dtype,
|
61 |
+
)
|
62 |
+
from pandas._libs.tslibs.offsets import (
|
63 |
+
BaseOffset,
|
64 |
+
Tick,
|
65 |
+
to_offset,
|
66 |
+
)
|
67 |
+
from pandas._libs.tslibs.parsing import guess_datetime_format
|
68 |
+
from pandas._libs.tslibs.period import (
|
69 |
+
IncompatibleFrequency,
|
70 |
+
Period,
|
71 |
+
)
|
72 |
+
from pandas._libs.tslibs.timedeltas import (
|
73 |
+
Timedelta,
|
74 |
+
delta_to_nanoseconds,
|
75 |
+
ints_to_pytimedelta,
|
76 |
+
)
|
77 |
+
from pandas._libs.tslibs.timestamps import Timestamp
|
78 |
+
from pandas._libs.tslibs.timezones import tz_compare
|
79 |
+
from pandas._libs.tslibs.tzconversion import tz_convert_from_utc_single
|
80 |
+
from pandas._libs.tslibs.vectorized import (
|
81 |
+
dt64arr_to_periodarr,
|
82 |
+
get_resolution,
|
83 |
+
ints_to_pydatetime,
|
84 |
+
is_date_array_normalized,
|
85 |
+
normalize_i8_timestamps,
|
86 |
+
tz_convert_from_utc,
|
87 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/base.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (62.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/ccalendar.pyi
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
DAYS: list[str]
|
2 |
+
MONTH_ALIASES: dict[int, str]
|
3 |
+
MONTH_NUMBERS: dict[str, int]
|
4 |
+
MONTHS: list[str]
|
5 |
+
int_to_weekday: dict[int, str]
|
6 |
+
|
7 |
+
def get_firstbday(year: int, month: int) -> int: ...
|
8 |
+
def get_lastbday(year: int, month: int) -> int: ...
|
9 |
+
def get_day_of_year(year: int, month: int, day: int) -> int: ...
|
10 |
+
def get_iso_calendar(year: int, month: int, day: int) -> tuple[int, int, int]: ...
|
11 |
+
def get_week_of_year(year: int, month: int, day: int) -> int: ...
|
12 |
+
def get_days_in_month(year: int, month: int) -> int: ...
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.pyi
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import (
|
2 |
+
datetime,
|
3 |
+
tzinfo,
|
4 |
+
)
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
DT64NS_DTYPE: np.dtype
|
9 |
+
TD64NS_DTYPE: np.dtype
|
10 |
+
|
11 |
+
def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ...
|
12 |
+
def cast_from_unit_vectorized(
|
13 |
+
values: np.ndarray, unit: str, out_unit: str = ...
|
14 |
+
) -> np.ndarray: ...
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/dtypes.pyi
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
|
3 |
+
OFFSET_TO_PERIOD_FREQSTR: dict[str, str]
|
4 |
+
|
5 |
+
def periods_per_day(reso: int = ...) -> int: ...
|
6 |
+
def periods_per_second(reso: int) -> int: ...
|
7 |
+
def abbrev_to_npy_unit(abbrev: str | None) -> int: ...
|
8 |
+
def freq_to_period_freqstr(freq_n: int, freq_name: str) -> str: ...
|
9 |
+
|
10 |
+
class PeriodDtypeBase:
|
11 |
+
_dtype_code: int # PeriodDtypeCode
|
12 |
+
_n: int
|
13 |
+
|
14 |
+
# actually __cinit__
|
15 |
+
def __new__(cls, code: int, n: int): ...
|
16 |
+
@property
|
17 |
+
def _freq_group_code(self) -> int: ...
|
18 |
+
@property
|
19 |
+
def _resolution_obj(self) -> Resolution: ...
|
20 |
+
def _get_to_timestamp_base(self) -> int: ...
|
21 |
+
@property
|
22 |
+
def _freqstr(self) -> str: ...
|
23 |
+
def __hash__(self) -> int: ...
|
24 |
+
def _is_tick_like(self) -> bool: ...
|
25 |
+
@property
|
26 |
+
def _creso(self) -> int: ...
|
27 |
+
@property
|
28 |
+
def _td64_unit(self) -> str: ...
|
29 |
+
|
30 |
+
class FreqGroup(Enum):
|
31 |
+
FR_ANN: int
|
32 |
+
FR_QTR: int
|
33 |
+
FR_MTH: int
|
34 |
+
FR_WK: int
|
35 |
+
FR_BUS: int
|
36 |
+
FR_DAY: int
|
37 |
+
FR_HR: int
|
38 |
+
FR_MIN: int
|
39 |
+
FR_SEC: int
|
40 |
+
FR_MS: int
|
41 |
+
FR_US: int
|
42 |
+
FR_NS: int
|
43 |
+
FR_UND: int
|
44 |
+
@staticmethod
|
45 |
+
def from_period_dtype_code(code: int) -> FreqGroup: ...
|
46 |
+
|
47 |
+
class Resolution(Enum):
|
48 |
+
RESO_NS: int
|
49 |
+
RESO_US: int
|
50 |
+
RESO_MS: int
|
51 |
+
RESO_SEC: int
|
52 |
+
RESO_MIN: int
|
53 |
+
RESO_HR: int
|
54 |
+
RESO_DAY: int
|
55 |
+
RESO_MTH: int
|
56 |
+
RESO_QTR: int
|
57 |
+
RESO_YR: int
|
58 |
+
def __lt__(self, other: Resolution) -> bool: ...
|
59 |
+
def __ge__(self, other: Resolution) -> bool: ...
|
60 |
+
@property
|
61 |
+
def attrname(self) -> str: ...
|
62 |
+
@classmethod
|
63 |
+
def from_attrname(cls, attrname: str) -> Resolution: ...
|
64 |
+
@classmethod
|
65 |
+
def get_reso_from_freqstr(cls, freq: str) -> Resolution: ...
|
66 |
+
@property
|
67 |
+
def attr_abbrev(self) -> str: ...
|
68 |
+
|
69 |
+
class NpyDatetimeUnit(Enum):
|
70 |
+
NPY_FR_Y: int
|
71 |
+
NPY_FR_M: int
|
72 |
+
NPY_FR_W: int
|
73 |
+
NPY_FR_D: int
|
74 |
+
NPY_FR_h: int
|
75 |
+
NPY_FR_m: int
|
76 |
+
NPY_FR_s: int
|
77 |
+
NPY_FR_ms: int
|
78 |
+
NPY_FR_us: int
|
79 |
+
NPY_FR_ns: int
|
80 |
+
NPY_FR_ps: int
|
81 |
+
NPY_FR_fs: int
|
82 |
+
NPY_FR_as: int
|
83 |
+
NPY_FR_GENERIC: int
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.pyi
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from pandas._typing import npt
|
4 |
+
|
5 |
+
def build_field_sarray(
|
6 |
+
dtindex: npt.NDArray[np.int64], # const int64_t[:]
|
7 |
+
reso: int, # NPY_DATETIMEUNIT
|
8 |
+
) -> np.ndarray: ...
|
9 |
+
def month_position_check(fields, weekdays) -> str | None: ...
|
10 |
+
def get_date_name_field(
|
11 |
+
dtindex: npt.NDArray[np.int64], # const int64_t[:]
|
12 |
+
field: str,
|
13 |
+
locale: str | None = ...,
|
14 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
15 |
+
) -> npt.NDArray[np.object_]: ...
|
16 |
+
def get_start_end_field(
|
17 |
+
dtindex: npt.NDArray[np.int64],
|
18 |
+
field: str,
|
19 |
+
freqstr: str | None = ...,
|
20 |
+
month_kw: int = ...,
|
21 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
22 |
+
) -> npt.NDArray[np.bool_]: ...
|
23 |
+
def get_date_field(
|
24 |
+
dtindex: npt.NDArray[np.int64], # const int64_t[:]
|
25 |
+
field: str,
|
26 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
27 |
+
) -> npt.NDArray[np.int32]: ...
|
28 |
+
def get_timedelta_field(
|
29 |
+
tdindex: npt.NDArray[np.int64], # const int64_t[:]
|
30 |
+
field: str,
|
31 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
32 |
+
) -> npt.NDArray[np.int32]: ...
|
33 |
+
def get_timedelta_days(
|
34 |
+
tdindex: npt.NDArray[np.int64], # const int64_t[:]
|
35 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
36 |
+
) -> npt.NDArray[np.int64]: ...
|
37 |
+
def isleapyear_arr(
|
38 |
+
years: np.ndarray,
|
39 |
+
) -> npt.NDArray[np.bool_]: ...
|
40 |
+
def build_isocalendar_sarray(
|
41 |
+
dtindex: npt.NDArray[np.int64], # const int64_t[:]
|
42 |
+
reso: int, # NPY_DATETIMEUNIT
|
43 |
+
) -> np.ndarray: ...
|
44 |
+
def _get_locale_names(name_type: str, locale: str | None = ...): ...
|
45 |
+
|
46 |
+
class RoundTo:
|
47 |
+
@property
|
48 |
+
def MINUS_INFTY(self) -> int: ...
|
49 |
+
@property
|
50 |
+
def PLUS_INFTY(self) -> int: ...
|
51 |
+
@property
|
52 |
+
def NEAREST_HALF_EVEN(self) -> int: ...
|
53 |
+
@property
|
54 |
+
def NEAREST_HALF_PLUS_INFTY(self) -> int: ...
|
55 |
+
@property
|
56 |
+
def NEAREST_HALF_MINUS_INFTY(self) -> int: ...
|
57 |
+
|
58 |
+
def round_nsint64(
|
59 |
+
values: npt.NDArray[np.int64],
|
60 |
+
mode: RoundTo,
|
61 |
+
nanos: int,
|
62 |
+
) -> npt.NDArray[np.int64]: ...
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (237 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/np_datetime.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (152 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (457 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.pyi
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
from pandas._typing import npt
|
6 |
+
|
7 |
+
class DateParseError(ValueError): ...
|
8 |
+
|
9 |
+
def py_parse_datetime_string(
|
10 |
+
date_string: str,
|
11 |
+
dayfirst: bool = ...,
|
12 |
+
yearfirst: bool = ...,
|
13 |
+
) -> datetime: ...
|
14 |
+
def parse_datetime_string_with_reso(
|
15 |
+
date_string: str,
|
16 |
+
freq: str | None = ...,
|
17 |
+
dayfirst: bool | None = ...,
|
18 |
+
yearfirst: bool | None = ...,
|
19 |
+
) -> tuple[datetime, str]: ...
|
20 |
+
def _does_string_look_like_datetime(py_string: str) -> bool: ...
|
21 |
+
def quarter_to_myear(year: int, quarter: int, freq: str) -> tuple[int, int]: ...
|
22 |
+
def try_parse_dates(
|
23 |
+
values: npt.NDArray[np.object_], # object[:]
|
24 |
+
parser,
|
25 |
+
) -> npt.NDArray[np.object_]: ...
|
26 |
+
def guess_datetime_format(
|
27 |
+
dt_str: str,
|
28 |
+
dayfirst: bool | None = ...,
|
29 |
+
) -> str | None: ...
|
30 |
+
def concat_date_cols(
|
31 |
+
date_cols: tuple,
|
32 |
+
) -> npt.NDArray[np.object_]: ...
|
33 |
+
def get_rule_month(source: str) -> str: ...
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.pyi
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from pandas._typing import npt
|
4 |
+
|
5 |
+
def array_strptime(
|
6 |
+
values: npt.NDArray[np.object_],
|
7 |
+
fmt: str | None,
|
8 |
+
exact: bool = ...,
|
9 |
+
errors: str = ...,
|
10 |
+
utc: bool = ...,
|
11 |
+
creso: int = ..., # NPY_DATETIMEUNIT
|
12 |
+
) -> tuple[np.ndarray, np.ndarray]: ...
|
13 |
+
|
14 |
+
# first ndarray is M8[ns], second is object ndarray of tzinfo | None
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (652 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.pyi
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import timedelta
|
2 |
+
from typing import (
|
3 |
+
ClassVar,
|
4 |
+
Literal,
|
5 |
+
TypeAlias,
|
6 |
+
TypeVar,
|
7 |
+
overload,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas._libs.tslibs import (
|
13 |
+
NaTType,
|
14 |
+
Tick,
|
15 |
+
)
|
16 |
+
from pandas._typing import (
|
17 |
+
Frequency,
|
18 |
+
Self,
|
19 |
+
npt,
|
20 |
+
)
|
21 |
+
|
22 |
+
# This should be kept consistent with the keys in the dict timedelta_abbrevs
|
23 |
+
# in pandas/_libs/tslibs/timedeltas.pyx
|
24 |
+
UnitChoices: TypeAlias = Literal[
|
25 |
+
"Y",
|
26 |
+
"y",
|
27 |
+
"M",
|
28 |
+
"W",
|
29 |
+
"w",
|
30 |
+
"D",
|
31 |
+
"d",
|
32 |
+
"days",
|
33 |
+
"day",
|
34 |
+
"hours",
|
35 |
+
"hour",
|
36 |
+
"hr",
|
37 |
+
"h",
|
38 |
+
"m",
|
39 |
+
"minute",
|
40 |
+
"min",
|
41 |
+
"minutes",
|
42 |
+
"T",
|
43 |
+
"t",
|
44 |
+
"s",
|
45 |
+
"seconds",
|
46 |
+
"sec",
|
47 |
+
"second",
|
48 |
+
"ms",
|
49 |
+
"milliseconds",
|
50 |
+
"millisecond",
|
51 |
+
"milli",
|
52 |
+
"millis",
|
53 |
+
"L",
|
54 |
+
"l",
|
55 |
+
"us",
|
56 |
+
"microseconds",
|
57 |
+
"microsecond",
|
58 |
+
"µs",
|
59 |
+
"micro",
|
60 |
+
"micros",
|
61 |
+
"u",
|
62 |
+
"ns",
|
63 |
+
"nanoseconds",
|
64 |
+
"nano",
|
65 |
+
"nanos",
|
66 |
+
"nanosecond",
|
67 |
+
"n",
|
68 |
+
]
|
69 |
+
_S = TypeVar("_S", bound=timedelta)
|
70 |
+
|
71 |
+
def get_unit_for_round(freq, creso: int) -> int: ...
|
72 |
+
def disallow_ambiguous_unit(unit: str | None) -> None: ...
|
73 |
+
def ints_to_pytimedelta(
|
74 |
+
m8values: npt.NDArray[np.timedelta64],
|
75 |
+
box: bool = ...,
|
76 |
+
) -> npt.NDArray[np.object_]: ...
|
77 |
+
def array_to_timedelta64(
|
78 |
+
values: npt.NDArray[np.object_],
|
79 |
+
unit: str | None = ...,
|
80 |
+
errors: str = ...,
|
81 |
+
) -> np.ndarray: ... # np.ndarray[m8ns]
|
82 |
+
def parse_timedelta_unit(unit: str | None) -> UnitChoices: ...
|
83 |
+
def delta_to_nanoseconds(
|
84 |
+
delta: np.timedelta64 | timedelta | Tick,
|
85 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
86 |
+
round_ok: bool = ...,
|
87 |
+
) -> int: ...
|
88 |
+
def floordiv_object_array(
|
89 |
+
left: np.ndarray, right: npt.NDArray[np.object_]
|
90 |
+
) -> np.ndarray: ...
|
91 |
+
def truediv_object_array(
|
92 |
+
left: np.ndarray, right: npt.NDArray[np.object_]
|
93 |
+
) -> np.ndarray: ...
|
94 |
+
|
95 |
+
class Timedelta(timedelta):
|
96 |
+
_creso: int
|
97 |
+
min: ClassVar[Timedelta]
|
98 |
+
max: ClassVar[Timedelta]
|
99 |
+
resolution: ClassVar[Timedelta]
|
100 |
+
value: int # np.int64
|
101 |
+
_value: int # np.int64
|
102 |
+
# error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
|
103 |
+
def __new__( # type: ignore[misc]
|
104 |
+
cls: type[_S],
|
105 |
+
value=...,
|
106 |
+
unit: str | None = ...,
|
107 |
+
**kwargs: float | np.integer | np.floating,
|
108 |
+
) -> _S | NaTType: ...
|
109 |
+
@classmethod
|
110 |
+
def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ...
|
111 |
+
@property
|
112 |
+
def days(self) -> int: ...
|
113 |
+
@property
|
114 |
+
def seconds(self) -> int: ...
|
115 |
+
@property
|
116 |
+
def microseconds(self) -> int: ...
|
117 |
+
def total_seconds(self) -> float: ...
|
118 |
+
def to_pytimedelta(self) -> timedelta: ...
|
119 |
+
def to_timedelta64(self) -> np.timedelta64: ...
|
120 |
+
@property
|
121 |
+
def asm8(self) -> np.timedelta64: ...
|
122 |
+
# TODO: round/floor/ceil could return NaT?
|
123 |
+
def round(self, freq: Frequency) -> Self: ...
|
124 |
+
def floor(self, freq: Frequency) -> Self: ...
|
125 |
+
def ceil(self, freq: Frequency) -> Self: ...
|
126 |
+
@property
|
127 |
+
def resolution_string(self) -> str: ...
|
128 |
+
def __add__(self, other: timedelta) -> Timedelta: ...
|
129 |
+
def __radd__(self, other: timedelta) -> Timedelta: ...
|
130 |
+
def __sub__(self, other: timedelta) -> Timedelta: ...
|
131 |
+
def __rsub__(self, other: timedelta) -> Timedelta: ...
|
132 |
+
def __neg__(self) -> Timedelta: ...
|
133 |
+
def __pos__(self) -> Timedelta: ...
|
134 |
+
def __abs__(self) -> Timedelta: ...
|
135 |
+
def __mul__(self, other: float) -> Timedelta: ...
|
136 |
+
def __rmul__(self, other: float) -> Timedelta: ...
|
137 |
+
# error: Signature of "__floordiv__" incompatible with supertype "timedelta"
|
138 |
+
@overload # type: ignore[override]
|
139 |
+
def __floordiv__(self, other: timedelta) -> int: ...
|
140 |
+
@overload
|
141 |
+
def __floordiv__(self, other: float) -> Timedelta: ...
|
142 |
+
@overload
|
143 |
+
def __floordiv__(
|
144 |
+
self, other: npt.NDArray[np.timedelta64]
|
145 |
+
) -> npt.NDArray[np.intp]: ...
|
146 |
+
@overload
|
147 |
+
def __floordiv__(
|
148 |
+
self, other: npt.NDArray[np.number]
|
149 |
+
) -> npt.NDArray[np.timedelta64] | Timedelta: ...
|
150 |
+
@overload
|
151 |
+
def __rfloordiv__(self, other: timedelta | str) -> int: ...
|
152 |
+
@overload
|
153 |
+
def __rfloordiv__(self, other: None | NaTType) -> NaTType: ...
|
154 |
+
@overload
|
155 |
+
def __rfloordiv__(self, other: np.ndarray) -> npt.NDArray[np.timedelta64]: ...
|
156 |
+
@overload
|
157 |
+
def __truediv__(self, other: timedelta) -> float: ...
|
158 |
+
@overload
|
159 |
+
def __truediv__(self, other: float) -> Timedelta: ...
|
160 |
+
def __mod__(self, other: timedelta) -> Timedelta: ...
|
161 |
+
def __divmod__(self, other: timedelta) -> tuple[int, Timedelta]: ...
|
162 |
+
def __le__(self, other: timedelta) -> bool: ...
|
163 |
+
def __lt__(self, other: timedelta) -> bool: ...
|
164 |
+
def __ge__(self, other: timedelta) -> bool: ...
|
165 |
+
def __gt__(self, other: timedelta) -> bool: ...
|
166 |
+
def __hash__(self) -> int: ...
|
167 |
+
def isoformat(self) -> str: ...
|
168 |
+
def to_numpy(
|
169 |
+
self, dtype: npt.DTypeLike = ..., copy: bool = False
|
170 |
+
) -> np.timedelta64: ...
|
171 |
+
def view(self, dtype: npt.DTypeLike) -> object: ...
|
172 |
+
@property
|
173 |
+
def unit(self) -> str: ...
|
174 |
+
def as_unit(self, unit: str, round_ok: bool = ...) -> Timedelta: ...
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (665 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.pyi
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import (
|
2 |
+
date as _date,
|
3 |
+
datetime,
|
4 |
+
time as _time,
|
5 |
+
timedelta,
|
6 |
+
tzinfo as _tzinfo,
|
7 |
+
)
|
8 |
+
from time import struct_time
|
9 |
+
from typing import (
|
10 |
+
ClassVar,
|
11 |
+
Literal,
|
12 |
+
TypeAlias,
|
13 |
+
overload,
|
14 |
+
)
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
|
18 |
+
from pandas._libs.tslibs import (
|
19 |
+
BaseOffset,
|
20 |
+
NaTType,
|
21 |
+
Period,
|
22 |
+
Tick,
|
23 |
+
Timedelta,
|
24 |
+
)
|
25 |
+
from pandas._typing import (
|
26 |
+
Self,
|
27 |
+
TimestampNonexistent,
|
28 |
+
)
|
29 |
+
|
30 |
+
_TimeZones: TypeAlias = str | _tzinfo | None | int
|
31 |
+
|
32 |
+
def integer_op_not_supported(obj: object) -> TypeError: ...
|
33 |
+
|
34 |
+
class Timestamp(datetime):
|
35 |
+
_creso: int
|
36 |
+
min: ClassVar[Timestamp]
|
37 |
+
max: ClassVar[Timestamp]
|
38 |
+
|
39 |
+
resolution: ClassVar[Timedelta]
|
40 |
+
_value: int # np.int64
|
41 |
+
# error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
|
42 |
+
def __new__( # type: ignore[misc]
|
43 |
+
cls: type[Self],
|
44 |
+
ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ...,
|
45 |
+
year: int | None = ...,
|
46 |
+
month: int | None = ...,
|
47 |
+
day: int | None = ...,
|
48 |
+
hour: int | None = ...,
|
49 |
+
minute: int | None = ...,
|
50 |
+
second: int | None = ...,
|
51 |
+
microsecond: int | None = ...,
|
52 |
+
tzinfo: _tzinfo | None = ...,
|
53 |
+
*,
|
54 |
+
nanosecond: int | None = ...,
|
55 |
+
tz: _TimeZones = ...,
|
56 |
+
unit: str | int | None = ...,
|
57 |
+
fold: int | None = ...,
|
58 |
+
) -> Self | NaTType: ...
|
59 |
+
@classmethod
|
60 |
+
def _from_value_and_reso(
|
61 |
+
cls, value: int, reso: int, tz: _TimeZones
|
62 |
+
) -> Timestamp: ...
|
63 |
+
@property
|
64 |
+
def value(self) -> int: ... # np.int64
|
65 |
+
@property
|
66 |
+
def year(self) -> int: ...
|
67 |
+
@property
|
68 |
+
def month(self) -> int: ...
|
69 |
+
@property
|
70 |
+
def day(self) -> int: ...
|
71 |
+
@property
|
72 |
+
def hour(self) -> int: ...
|
73 |
+
@property
|
74 |
+
def minute(self) -> int: ...
|
75 |
+
@property
|
76 |
+
def second(self) -> int: ...
|
77 |
+
@property
|
78 |
+
def microsecond(self) -> int: ...
|
79 |
+
@property
|
80 |
+
def nanosecond(self) -> int: ...
|
81 |
+
@property
|
82 |
+
def tzinfo(self) -> _tzinfo | None: ...
|
83 |
+
@property
|
84 |
+
def tz(self) -> _tzinfo | None: ...
|
85 |
+
@property
|
86 |
+
def fold(self) -> int: ...
|
87 |
+
@classmethod
|
88 |
+
def fromtimestamp(cls, ts: float, tz: _TimeZones = ...) -> Self: ...
|
89 |
+
@classmethod
|
90 |
+
def utcfromtimestamp(cls, ts: float) -> Self: ...
|
91 |
+
@classmethod
|
92 |
+
def today(cls, tz: _TimeZones = ...) -> Self: ...
|
93 |
+
@classmethod
|
94 |
+
def fromordinal(
|
95 |
+
cls,
|
96 |
+
ordinal: int,
|
97 |
+
tz: _TimeZones = ...,
|
98 |
+
) -> Self: ...
|
99 |
+
@classmethod
|
100 |
+
def now(cls, tz: _TimeZones = ...) -> Self: ...
|
101 |
+
@classmethod
|
102 |
+
def utcnow(cls) -> Self: ...
|
103 |
+
# error: Signature of "combine" incompatible with supertype "datetime"
|
104 |
+
@classmethod
|
105 |
+
def combine( # type: ignore[override]
|
106 |
+
cls, date: _date, time: _time
|
107 |
+
) -> datetime: ...
|
108 |
+
@classmethod
|
109 |
+
def fromisoformat(cls, date_string: str) -> Self: ...
|
110 |
+
def strftime(self, format: str) -> str: ...
|
111 |
+
def __format__(self, fmt: str) -> str: ...
|
112 |
+
def toordinal(self) -> int: ...
|
113 |
+
def timetuple(self) -> struct_time: ...
|
114 |
+
def timestamp(self) -> float: ...
|
115 |
+
def utctimetuple(self) -> struct_time: ...
|
116 |
+
def date(self) -> _date: ...
|
117 |
+
def time(self) -> _time: ...
|
118 |
+
def timetz(self) -> _time: ...
|
119 |
+
# LSP violation: nanosecond is not present in datetime.datetime.replace
|
120 |
+
# and has positional args following it
|
121 |
+
def replace( # type: ignore[override]
|
122 |
+
self,
|
123 |
+
year: int | None = ...,
|
124 |
+
month: int | None = ...,
|
125 |
+
day: int | None = ...,
|
126 |
+
hour: int | None = ...,
|
127 |
+
minute: int | None = ...,
|
128 |
+
second: int | None = ...,
|
129 |
+
microsecond: int | None = ...,
|
130 |
+
nanosecond: int | None = ...,
|
131 |
+
tzinfo: _tzinfo | type[object] | None = ...,
|
132 |
+
fold: int | None = ...,
|
133 |
+
) -> Self: ...
|
134 |
+
# LSP violation: datetime.datetime.astimezone has a default value for tz
|
135 |
+
def astimezone(self, tz: _TimeZones) -> Self: ... # type: ignore[override]
|
136 |
+
def ctime(self) -> str: ...
|
137 |
+
def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
|
138 |
+
@classmethod
|
139 |
+
def strptime(
|
140 |
+
# Note: strptime is actually disabled and raises NotImplementedError
|
141 |
+
cls,
|
142 |
+
date_string: str,
|
143 |
+
format: str,
|
144 |
+
) -> Self: ...
|
145 |
+
def utcoffset(self) -> timedelta | None: ...
|
146 |
+
def tzname(self) -> str | None: ...
|
147 |
+
def dst(self) -> timedelta | None: ...
|
148 |
+
def __le__(self, other: datetime) -> bool: ... # type: ignore[override]
|
149 |
+
def __lt__(self, other: datetime) -> bool: ... # type: ignore[override]
|
150 |
+
def __ge__(self, other: datetime) -> bool: ... # type: ignore[override]
|
151 |
+
def __gt__(self, other: datetime) -> bool: ... # type: ignore[override]
|
152 |
+
# error: Signature of "__add__" incompatible with supertype "date"/"datetime"
|
153 |
+
@overload # type: ignore[override]
|
154 |
+
def __add__(self, other: np.ndarray) -> np.ndarray: ...
|
155 |
+
@overload
|
156 |
+
def __add__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ...
|
157 |
+
def __radd__(self, other: timedelta) -> Self: ...
|
158 |
+
@overload # type: ignore[override]
|
159 |
+
def __sub__(self, other: datetime) -> Timedelta: ...
|
160 |
+
@overload
|
161 |
+
def __sub__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ...
|
162 |
+
def __hash__(self) -> int: ...
|
163 |
+
def weekday(self) -> int: ...
|
164 |
+
def isoweekday(self) -> int: ...
|
165 |
+
# Return type "Tuple[int, int, int]" of "isocalendar" incompatible with return
|
166 |
+
# type "_IsoCalendarDate" in supertype "date"
|
167 |
+
def isocalendar(self) -> tuple[int, int, int]: ... # type: ignore[override]
|
168 |
+
@property
|
169 |
+
def is_leap_year(self) -> bool: ...
|
170 |
+
@property
|
171 |
+
def is_month_start(self) -> bool: ...
|
172 |
+
@property
|
173 |
+
def is_quarter_start(self) -> bool: ...
|
174 |
+
@property
|
175 |
+
def is_year_start(self) -> bool: ...
|
176 |
+
@property
|
177 |
+
def is_month_end(self) -> bool: ...
|
178 |
+
@property
|
179 |
+
def is_quarter_end(self) -> bool: ...
|
180 |
+
@property
|
181 |
+
def is_year_end(self) -> bool: ...
|
182 |
+
def to_pydatetime(self, warn: bool = ...) -> datetime: ...
|
183 |
+
def to_datetime64(self) -> np.datetime64: ...
|
184 |
+
def to_period(self, freq: BaseOffset | str | None = None) -> Period: ...
|
185 |
+
def to_julian_date(self) -> np.float64: ...
|
186 |
+
@property
|
187 |
+
def asm8(self) -> np.datetime64: ...
|
188 |
+
def tz_convert(self, tz: _TimeZones) -> Self: ...
|
189 |
+
# TODO: could return NaT?
|
190 |
+
def tz_localize(
|
191 |
+
self,
|
192 |
+
tz: _TimeZones,
|
193 |
+
ambiguous: bool | Literal["raise", "NaT"] = ...,
|
194 |
+
nonexistent: TimestampNonexistent = ...,
|
195 |
+
) -> Self: ...
|
196 |
+
def normalize(self) -> Self: ...
|
197 |
+
# TODO: round/floor/ceil could return NaT?
|
198 |
+
def round(
|
199 |
+
self,
|
200 |
+
freq: str,
|
201 |
+
ambiguous: bool | Literal["raise", "NaT"] = ...,
|
202 |
+
nonexistent: TimestampNonexistent = ...,
|
203 |
+
) -> Self: ...
|
204 |
+
def floor(
|
205 |
+
self,
|
206 |
+
freq: str,
|
207 |
+
ambiguous: bool | Literal["raise", "NaT"] = ...,
|
208 |
+
nonexistent: TimestampNonexistent = ...,
|
209 |
+
) -> Self: ...
|
210 |
+
def ceil(
|
211 |
+
self,
|
212 |
+
freq: str,
|
213 |
+
ambiguous: bool | Literal["raise", "NaT"] = ...,
|
214 |
+
nonexistent: TimestampNonexistent = ...,
|
215 |
+
) -> Self: ...
|
216 |
+
def day_name(self, locale: str | None = ...) -> str: ...
|
217 |
+
def month_name(self, locale: str | None = ...) -> str: ...
|
218 |
+
@property
|
219 |
+
def day_of_week(self) -> int: ...
|
220 |
+
@property
|
221 |
+
def dayofweek(self) -> int: ...
|
222 |
+
@property
|
223 |
+
def day_of_year(self) -> int: ...
|
224 |
+
@property
|
225 |
+
def dayofyear(self) -> int: ...
|
226 |
+
@property
|
227 |
+
def quarter(self) -> int: ...
|
228 |
+
@property
|
229 |
+
def week(self) -> int: ...
|
230 |
+
def to_numpy(
|
231 |
+
self, dtype: np.dtype | None = ..., copy: bool = ...
|
232 |
+
) -> np.datetime64: ...
|
233 |
+
@property
|
234 |
+
def _date_repr(self) -> str: ...
|
235 |
+
@property
|
236 |
+
def days_in_month(self) -> int: ...
|
237 |
+
@property
|
238 |
+
def daysinmonth(self) -> int: ...
|
239 |
+
@property
|
240 |
+
def unit(self) -> str: ...
|
241 |
+
def as_unit(self, unit: str, round_ok: bool = ...) -> Timestamp: ...
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (341 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.pyi
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import (
|
2 |
+
timedelta,
|
3 |
+
tzinfo,
|
4 |
+
)
|
5 |
+
from typing import Iterable
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
from pandas._typing import npt
|
10 |
+
|
11 |
+
# tz_convert_from_utc_single exposed for testing
|
12 |
+
def tz_convert_from_utc_single(
|
13 |
+
utc_val: np.int64, tz: tzinfo, creso: int = ...
|
14 |
+
) -> np.int64: ...
|
15 |
+
def tz_localize_to_utc(
|
16 |
+
vals: npt.NDArray[np.int64],
|
17 |
+
tz: tzinfo | None,
|
18 |
+
ambiguous: str | bool | Iterable[bool] | None = ...,
|
19 |
+
nonexistent: str | timedelta | np.timedelta64 | None = ...,
|
20 |
+
creso: int = ..., # NPY_DATETIMEUNIT
|
21 |
+
) -> npt.NDArray[np.int64]: ...
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (250 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.pyi
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
For cython types that cannot be represented precisely, closest-available
|
3 |
+
python equivalents are used, and the precise types kept as adjacent comments.
|
4 |
+
"""
|
5 |
+
from datetime import tzinfo
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
from pandas._libs.tslibs.dtypes import Resolution
|
10 |
+
from pandas._typing import npt
|
11 |
+
|
12 |
+
def dt64arr_to_periodarr(
|
13 |
+
stamps: npt.NDArray[np.int64],
|
14 |
+
freq: int,
|
15 |
+
tz: tzinfo | None,
|
16 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
17 |
+
) -> npt.NDArray[np.int64]: ...
|
18 |
+
def is_date_array_normalized(
|
19 |
+
stamps: npt.NDArray[np.int64],
|
20 |
+
tz: tzinfo | None,
|
21 |
+
reso: int, # NPY_DATETIMEUNIT
|
22 |
+
) -> bool: ...
|
23 |
+
def normalize_i8_timestamps(
|
24 |
+
stamps: npt.NDArray[np.int64],
|
25 |
+
tz: tzinfo | None,
|
26 |
+
reso: int, # NPY_DATETIMEUNIT
|
27 |
+
) -> npt.NDArray[np.int64]: ...
|
28 |
+
def get_resolution(
|
29 |
+
stamps: npt.NDArray[np.int64],
|
30 |
+
tz: tzinfo | None = ...,
|
31 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
32 |
+
) -> Resolution: ...
|
33 |
+
def ints_to_pydatetime(
|
34 |
+
stamps: npt.NDArray[np.int64],
|
35 |
+
tz: tzinfo | None = ...,
|
36 |
+
box: str = ...,
|
37 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
38 |
+
) -> npt.NDArray[np.object_]: ...
|
39 |
+
def tz_convert_from_utc(
|
40 |
+
stamps: npt.NDArray[np.int64],
|
41 |
+
tz: tzinfo | None,
|
42 |
+
reso: int = ..., # NPY_DATETIMEUNIT
|
43 |
+
) -> npt.NDArray[np.int64]: ...
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__init__.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.io.parsers.readers import (
|
2 |
+
TextFileReader,
|
3 |
+
TextParser,
|
4 |
+
read_csv,
|
5 |
+
read_fwf,
|
6 |
+
read_table,
|
7 |
+
)
|
8 |
+
|
9 |
+
__all__ = ["TextFileReader", "TextParser", "read_csv", "read_fwf", "read_table"]
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc
ADDED
Binary file (7.99 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc
ADDED
Binary file (33.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc
ADDED
Binary file (9.56 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc
ADDED
Binary file (61 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING
|
4 |
+
import warnings
|
5 |
+
|
6 |
+
from pandas._config import using_pyarrow_string_dtype
|
7 |
+
|
8 |
+
from pandas._libs import lib
|
9 |
+
from pandas.compat._optional import import_optional_dependency
|
10 |
+
from pandas.errors import (
|
11 |
+
ParserError,
|
12 |
+
ParserWarning,
|
13 |
+
)
|
14 |
+
from pandas.util._exceptions import find_stack_level
|
15 |
+
|
16 |
+
from pandas.core.dtypes.common import pandas_dtype
|
17 |
+
from pandas.core.dtypes.inference import is_integer
|
18 |
+
|
19 |
+
import pandas as pd
|
20 |
+
from pandas import DataFrame
|
21 |
+
|
22 |
+
from pandas.io._util import (
|
23 |
+
_arrow_dtype_mapping,
|
24 |
+
arrow_string_types_mapper,
|
25 |
+
)
|
26 |
+
from pandas.io.parsers.base_parser import ParserBase
|
27 |
+
|
28 |
+
if TYPE_CHECKING:
|
29 |
+
from pandas._typing import ReadBuffer
|
30 |
+
|
31 |
+
|
32 |
+
class ArrowParserWrapper(ParserBase):
|
33 |
+
"""
|
34 |
+
Wrapper for the pyarrow engine for read_csv()
|
35 |
+
"""
|
36 |
+
|
37 |
+
def __init__(self, src: ReadBuffer[bytes], **kwds) -> None:
|
38 |
+
super().__init__(kwds)
|
39 |
+
self.kwds = kwds
|
40 |
+
self.src = src
|
41 |
+
|
42 |
+
self._parse_kwds()
|
43 |
+
|
44 |
+
def _parse_kwds(self) -> None:
|
45 |
+
"""
|
46 |
+
Validates keywords before passing to pyarrow.
|
47 |
+
"""
|
48 |
+
encoding: str | None = self.kwds.get("encoding")
|
49 |
+
self.encoding = "utf-8" if encoding is None else encoding
|
50 |
+
|
51 |
+
na_values = self.kwds["na_values"]
|
52 |
+
if isinstance(na_values, dict):
|
53 |
+
raise ValueError(
|
54 |
+
"The pyarrow engine doesn't support passing a dict for na_values"
|
55 |
+
)
|
56 |
+
self.na_values = list(self.kwds["na_values"])
|
57 |
+
|
58 |
+
def _get_pyarrow_options(self) -> None:
|
59 |
+
"""
|
60 |
+
Rename some arguments to pass to pyarrow
|
61 |
+
"""
|
62 |
+
mapping = {
|
63 |
+
"usecols": "include_columns",
|
64 |
+
"na_values": "null_values",
|
65 |
+
"escapechar": "escape_char",
|
66 |
+
"skip_blank_lines": "ignore_empty_lines",
|
67 |
+
"decimal": "decimal_point",
|
68 |
+
"quotechar": "quote_char",
|
69 |
+
}
|
70 |
+
for pandas_name, pyarrow_name in mapping.items():
|
71 |
+
if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None:
|
72 |
+
self.kwds[pyarrow_name] = self.kwds.pop(pandas_name)
|
73 |
+
|
74 |
+
# Date format handling
|
75 |
+
# If we get a string, we need to convert it into a list for pyarrow
|
76 |
+
# If we get a dict, we want to parse those separately
|
77 |
+
date_format = self.date_format
|
78 |
+
if isinstance(date_format, str):
|
79 |
+
date_format = [date_format]
|
80 |
+
else:
|
81 |
+
# In case of dict, we don't want to propagate through, so
|
82 |
+
# just set to pyarrow default of None
|
83 |
+
|
84 |
+
# Ideally, in future we disable pyarrow dtype inference (read in as string)
|
85 |
+
# to prevent misreads.
|
86 |
+
date_format = None
|
87 |
+
self.kwds["timestamp_parsers"] = date_format
|
88 |
+
|
89 |
+
self.parse_options = {
|
90 |
+
option_name: option_value
|
91 |
+
for option_name, option_value in self.kwds.items()
|
92 |
+
if option_value is not None
|
93 |
+
and option_name
|
94 |
+
in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines")
|
95 |
+
}
|
96 |
+
|
97 |
+
on_bad_lines = self.kwds.get("on_bad_lines")
|
98 |
+
if on_bad_lines is not None:
|
99 |
+
if callable(on_bad_lines):
|
100 |
+
self.parse_options["invalid_row_handler"] = on_bad_lines
|
101 |
+
elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR:
|
102 |
+
self.parse_options[
|
103 |
+
"invalid_row_handler"
|
104 |
+
] = None # PyArrow raises an exception by default
|
105 |
+
elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN:
|
106 |
+
|
107 |
+
def handle_warning(invalid_row) -> str:
|
108 |
+
warnings.warn(
|
109 |
+
f"Expected {invalid_row.expected_columns} columns, but found "
|
110 |
+
f"{invalid_row.actual_columns}: {invalid_row.text}",
|
111 |
+
ParserWarning,
|
112 |
+
stacklevel=find_stack_level(),
|
113 |
+
)
|
114 |
+
return "skip"
|
115 |
+
|
116 |
+
self.parse_options["invalid_row_handler"] = handle_warning
|
117 |
+
elif on_bad_lines == ParserBase.BadLineHandleMethod.SKIP:
|
118 |
+
self.parse_options["invalid_row_handler"] = lambda _: "skip"
|
119 |
+
|
120 |
+
self.convert_options = {
|
121 |
+
option_name: option_value
|
122 |
+
for option_name, option_value in self.kwds.items()
|
123 |
+
if option_value is not None
|
124 |
+
and option_name
|
125 |
+
in (
|
126 |
+
"include_columns",
|
127 |
+
"null_values",
|
128 |
+
"true_values",
|
129 |
+
"false_values",
|
130 |
+
"decimal_point",
|
131 |
+
"timestamp_parsers",
|
132 |
+
)
|
133 |
+
}
|
134 |
+
self.convert_options["strings_can_be_null"] = "" in self.kwds["null_values"]
|
135 |
+
# autogenerated column names are prefixed with 'f' in pyarrow.csv
|
136 |
+
if self.header is None and "include_columns" in self.convert_options:
|
137 |
+
self.convert_options["include_columns"] = [
|
138 |
+
f"f{n}" for n in self.convert_options["include_columns"]
|
139 |
+
]
|
140 |
+
|
141 |
+
self.read_options = {
|
142 |
+
"autogenerate_column_names": self.header is None,
|
143 |
+
"skip_rows": self.header
|
144 |
+
if self.header is not None
|
145 |
+
else self.kwds["skiprows"],
|
146 |
+
"encoding": self.encoding,
|
147 |
+
}
|
148 |
+
|
149 |
+
def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame:
|
150 |
+
"""
|
151 |
+
Processes data read in based on kwargs.
|
152 |
+
|
153 |
+
Parameters
|
154 |
+
----------
|
155 |
+
frame: DataFrame
|
156 |
+
The DataFrame to process.
|
157 |
+
|
158 |
+
Returns
|
159 |
+
-------
|
160 |
+
DataFrame
|
161 |
+
The processed DataFrame.
|
162 |
+
"""
|
163 |
+
num_cols = len(frame.columns)
|
164 |
+
multi_index_named = True
|
165 |
+
if self.header is None:
|
166 |
+
if self.names is None:
|
167 |
+
if self.header is None:
|
168 |
+
self.names = range(num_cols)
|
169 |
+
if len(self.names) != num_cols:
|
170 |
+
# usecols is passed through to pyarrow, we only handle index col here
|
171 |
+
# The only way self.names is not the same length as number of cols is
|
172 |
+
# if we have int index_col. We should just pad the names(they will get
|
173 |
+
# removed anyways) to expected length then.
|
174 |
+
self.names = list(range(num_cols - len(self.names))) + self.names
|
175 |
+
multi_index_named = False
|
176 |
+
frame.columns = self.names
|
177 |
+
# we only need the frame not the names
|
178 |
+
_, frame = self._do_date_conversions(frame.columns, frame)
|
179 |
+
if self.index_col is not None:
|
180 |
+
index_to_set = self.index_col.copy()
|
181 |
+
for i, item in enumerate(self.index_col):
|
182 |
+
if is_integer(item):
|
183 |
+
index_to_set[i] = frame.columns[item]
|
184 |
+
# String case
|
185 |
+
elif item not in frame.columns:
|
186 |
+
raise ValueError(f"Index {item} invalid")
|
187 |
+
|
188 |
+
# Process dtype for index_col and drop from dtypes
|
189 |
+
if self.dtype is not None:
|
190 |
+
key, new_dtype = (
|
191 |
+
(item, self.dtype.get(item))
|
192 |
+
if self.dtype.get(item) is not None
|
193 |
+
else (frame.columns[item], self.dtype.get(frame.columns[item]))
|
194 |
+
)
|
195 |
+
if new_dtype is not None:
|
196 |
+
frame[key] = frame[key].astype(new_dtype)
|
197 |
+
del self.dtype[key]
|
198 |
+
|
199 |
+
frame.set_index(index_to_set, drop=True, inplace=True)
|
200 |
+
# Clear names if headerless and no name given
|
201 |
+
if self.header is None and not multi_index_named:
|
202 |
+
frame.index.names = [None] * len(frame.index.names)
|
203 |
+
|
204 |
+
if self.dtype is not None:
|
205 |
+
# Ignore non-existent columns from dtype mapping
|
206 |
+
# like other parsers do
|
207 |
+
if isinstance(self.dtype, dict):
|
208 |
+
self.dtype = {
|
209 |
+
k: pandas_dtype(v)
|
210 |
+
for k, v in self.dtype.items()
|
211 |
+
if k in frame.columns
|
212 |
+
}
|
213 |
+
else:
|
214 |
+
self.dtype = pandas_dtype(self.dtype)
|
215 |
+
try:
|
216 |
+
frame = frame.astype(self.dtype)
|
217 |
+
except TypeError as e:
|
218 |
+
# GH#44901 reraise to keep api consistent
|
219 |
+
raise ValueError(e)
|
220 |
+
return frame
|
221 |
+
|
222 |
+
def _validate_usecols(self, usecols) -> None:
|
223 |
+
if lib.is_list_like(usecols) and not all(isinstance(x, str) for x in usecols):
|
224 |
+
raise ValueError(
|
225 |
+
"The pyarrow engine does not allow 'usecols' to be integer "
|
226 |
+
"column positions. Pass a list of string column names instead."
|
227 |
+
)
|
228 |
+
elif callable(usecols):
|
229 |
+
raise ValueError(
|
230 |
+
"The pyarrow engine does not allow 'usecols' to be a callable."
|
231 |
+
)
|
232 |
+
|
233 |
+
def read(self) -> DataFrame:
|
234 |
+
"""
|
235 |
+
Reads the contents of a CSV file into a DataFrame and
|
236 |
+
processes it according to the kwargs passed in the
|
237 |
+
constructor.
|
238 |
+
|
239 |
+
Returns
|
240 |
+
-------
|
241 |
+
DataFrame
|
242 |
+
The DataFrame created from the CSV file.
|
243 |
+
"""
|
244 |
+
pa = import_optional_dependency("pyarrow")
|
245 |
+
pyarrow_csv = import_optional_dependency("pyarrow.csv")
|
246 |
+
self._get_pyarrow_options()
|
247 |
+
|
248 |
+
try:
|
249 |
+
convert_options = pyarrow_csv.ConvertOptions(**self.convert_options)
|
250 |
+
except TypeError:
|
251 |
+
include = self.convert_options.get("include_columns", None)
|
252 |
+
if include is not None:
|
253 |
+
self._validate_usecols(include)
|
254 |
+
|
255 |
+
nulls = self.convert_options.get("null_values", set())
|
256 |
+
if not lib.is_list_like(nulls) or not all(
|
257 |
+
isinstance(x, str) for x in nulls
|
258 |
+
):
|
259 |
+
raise TypeError(
|
260 |
+
"The 'pyarrow' engine requires all na_values to be strings"
|
261 |
+
)
|
262 |
+
|
263 |
+
raise
|
264 |
+
|
265 |
+
try:
|
266 |
+
table = pyarrow_csv.read_csv(
|
267 |
+
self.src,
|
268 |
+
read_options=pyarrow_csv.ReadOptions(**self.read_options),
|
269 |
+
parse_options=pyarrow_csv.ParseOptions(**self.parse_options),
|
270 |
+
convert_options=convert_options,
|
271 |
+
)
|
272 |
+
except pa.ArrowInvalid as e:
|
273 |
+
raise ParserError(e) from e
|
274 |
+
|
275 |
+
dtype_backend = self.kwds["dtype_backend"]
|
276 |
+
|
277 |
+
# Convert all pa.null() cols -> float64 (non nullable)
|
278 |
+
# else Int64 (nullable case, see below)
|
279 |
+
if dtype_backend is lib.no_default:
|
280 |
+
new_schema = table.schema
|
281 |
+
new_type = pa.float64()
|
282 |
+
for i, arrow_type in enumerate(table.schema.types):
|
283 |
+
if pa.types.is_null(arrow_type):
|
284 |
+
new_schema = new_schema.set(
|
285 |
+
i, new_schema.field(i).with_type(new_type)
|
286 |
+
)
|
287 |
+
|
288 |
+
table = table.cast(new_schema)
|
289 |
+
|
290 |
+
if dtype_backend == "pyarrow":
|
291 |
+
frame = table.to_pandas(types_mapper=pd.ArrowDtype)
|
292 |
+
elif dtype_backend == "numpy_nullable":
|
293 |
+
# Modify the default mapping to also
|
294 |
+
# map null to Int64 (to match other engines)
|
295 |
+
dtype_mapping = _arrow_dtype_mapping()
|
296 |
+
dtype_mapping[pa.null()] = pd.Int64Dtype()
|
297 |
+
frame = table.to_pandas(types_mapper=dtype_mapping.get)
|
298 |
+
elif using_pyarrow_string_dtype():
|
299 |
+
frame = table.to_pandas(types_mapper=arrow_string_types_mapper())
|
300 |
+
|
301 |
+
else:
|
302 |
+
frame = table.to_pandas()
|
303 |
+
return self._finalize_pandas_output(frame)
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py
ADDED
@@ -0,0 +1,1448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections import defaultdict
|
4 |
+
from copy import copy
|
5 |
+
import csv
|
6 |
+
import datetime
|
7 |
+
from enum import Enum
|
8 |
+
import itertools
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Any,
|
12 |
+
Callable,
|
13 |
+
cast,
|
14 |
+
final,
|
15 |
+
overload,
|
16 |
+
)
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from pandas._libs import (
|
22 |
+
lib,
|
23 |
+
parsers,
|
24 |
+
)
|
25 |
+
import pandas._libs.ops as libops
|
26 |
+
from pandas._libs.parsers import STR_NA_VALUES
|
27 |
+
from pandas._libs.tslibs import parsing
|
28 |
+
from pandas.compat._optional import import_optional_dependency
|
29 |
+
from pandas.errors import (
|
30 |
+
ParserError,
|
31 |
+
ParserWarning,
|
32 |
+
)
|
33 |
+
from pandas.util._exceptions import find_stack_level
|
34 |
+
|
35 |
+
from pandas.core.dtypes.astype import astype_array
|
36 |
+
from pandas.core.dtypes.common import (
|
37 |
+
ensure_object,
|
38 |
+
is_bool_dtype,
|
39 |
+
is_dict_like,
|
40 |
+
is_extension_array_dtype,
|
41 |
+
is_float_dtype,
|
42 |
+
is_integer,
|
43 |
+
is_integer_dtype,
|
44 |
+
is_list_like,
|
45 |
+
is_object_dtype,
|
46 |
+
is_scalar,
|
47 |
+
is_string_dtype,
|
48 |
+
pandas_dtype,
|
49 |
+
)
|
50 |
+
from pandas.core.dtypes.dtypes import (
|
51 |
+
CategoricalDtype,
|
52 |
+
ExtensionDtype,
|
53 |
+
)
|
54 |
+
from pandas.core.dtypes.missing import isna
|
55 |
+
|
56 |
+
from pandas import (
|
57 |
+
ArrowDtype,
|
58 |
+
DataFrame,
|
59 |
+
DatetimeIndex,
|
60 |
+
StringDtype,
|
61 |
+
concat,
|
62 |
+
)
|
63 |
+
from pandas.core import algorithms
|
64 |
+
from pandas.core.arrays import (
|
65 |
+
ArrowExtensionArray,
|
66 |
+
BaseMaskedArray,
|
67 |
+
BooleanArray,
|
68 |
+
Categorical,
|
69 |
+
ExtensionArray,
|
70 |
+
FloatingArray,
|
71 |
+
IntegerArray,
|
72 |
+
)
|
73 |
+
from pandas.core.arrays.boolean import BooleanDtype
|
74 |
+
from pandas.core.indexes.api import (
|
75 |
+
Index,
|
76 |
+
MultiIndex,
|
77 |
+
default_index,
|
78 |
+
ensure_index_from_sequences,
|
79 |
+
)
|
80 |
+
from pandas.core.series import Series
|
81 |
+
from pandas.core.tools import datetimes as tools
|
82 |
+
|
83 |
+
from pandas.io.common import is_potential_multi_index
|
84 |
+
|
85 |
+
if TYPE_CHECKING:
|
86 |
+
from collections.abc import (
|
87 |
+
Hashable,
|
88 |
+
Iterable,
|
89 |
+
Mapping,
|
90 |
+
Sequence,
|
91 |
+
)
|
92 |
+
|
93 |
+
from pandas._typing import (
|
94 |
+
ArrayLike,
|
95 |
+
DtypeArg,
|
96 |
+
DtypeObj,
|
97 |
+
Scalar,
|
98 |
+
)
|
99 |
+
|
100 |
+
|
101 |
+
class ParserBase:
|
102 |
+
class BadLineHandleMethod(Enum):
|
103 |
+
ERROR = 0
|
104 |
+
WARN = 1
|
105 |
+
SKIP = 2
|
106 |
+
|
107 |
+
_implicit_index: bool
|
108 |
+
_first_chunk: bool
|
109 |
+
keep_default_na: bool
|
110 |
+
dayfirst: bool
|
111 |
+
cache_dates: bool
|
112 |
+
keep_date_col: bool
|
113 |
+
usecols_dtype: str | None
|
114 |
+
|
115 |
+
def __init__(self, kwds) -> None:
|
116 |
+
self._implicit_index = False
|
117 |
+
|
118 |
+
self.names = kwds.get("names")
|
119 |
+
self.orig_names: Sequence[Hashable] | None = None
|
120 |
+
|
121 |
+
self.index_col = kwds.get("index_col", None)
|
122 |
+
self.unnamed_cols: set = set()
|
123 |
+
self.index_names: Sequence[Hashable] | None = None
|
124 |
+
self.col_names: Sequence[Hashable] | None = None
|
125 |
+
|
126 |
+
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
|
127 |
+
self._parse_date_cols: Iterable = []
|
128 |
+
self.date_parser = kwds.pop("date_parser", lib.no_default)
|
129 |
+
self.date_format = kwds.pop("date_format", None)
|
130 |
+
self.dayfirst = kwds.pop("dayfirst", False)
|
131 |
+
self.keep_date_col = kwds.pop("keep_date_col", False)
|
132 |
+
|
133 |
+
self.na_values = kwds.get("na_values")
|
134 |
+
self.na_fvalues = kwds.get("na_fvalues")
|
135 |
+
self.na_filter = kwds.get("na_filter", False)
|
136 |
+
self.keep_default_na = kwds.get("keep_default_na", True)
|
137 |
+
|
138 |
+
self.dtype = copy(kwds.get("dtype", None))
|
139 |
+
self.converters = kwds.get("converters")
|
140 |
+
self.dtype_backend = kwds.get("dtype_backend")
|
141 |
+
|
142 |
+
self.true_values = kwds.get("true_values")
|
143 |
+
self.false_values = kwds.get("false_values")
|
144 |
+
self.cache_dates = kwds.pop("cache_dates", True)
|
145 |
+
|
146 |
+
self._date_conv = _make_date_converter(
|
147 |
+
date_parser=self.date_parser,
|
148 |
+
date_format=self.date_format,
|
149 |
+
dayfirst=self.dayfirst,
|
150 |
+
cache_dates=self.cache_dates,
|
151 |
+
)
|
152 |
+
|
153 |
+
# validate header options for mi
|
154 |
+
self.header = kwds.get("header")
|
155 |
+
if is_list_like(self.header, allow_sets=False):
|
156 |
+
if kwds.get("usecols"):
|
157 |
+
raise ValueError(
|
158 |
+
"cannot specify usecols when specifying a multi-index header"
|
159 |
+
)
|
160 |
+
if kwds.get("names"):
|
161 |
+
raise ValueError(
|
162 |
+
"cannot specify names when specifying a multi-index header"
|
163 |
+
)
|
164 |
+
|
165 |
+
# validate index_col that only contains integers
|
166 |
+
if self.index_col is not None:
|
167 |
+
# In this case we can pin down index_col as list[int]
|
168 |
+
if is_integer(self.index_col):
|
169 |
+
self.index_col = [self.index_col]
|
170 |
+
elif not (
|
171 |
+
is_list_like(self.index_col, allow_sets=False)
|
172 |
+
and all(map(is_integer, self.index_col))
|
173 |
+
):
|
174 |
+
raise ValueError(
|
175 |
+
"index_col must only contain row numbers "
|
176 |
+
"when specifying a multi-index header"
|
177 |
+
)
|
178 |
+
else:
|
179 |
+
self.index_col = list(self.index_col)
|
180 |
+
|
181 |
+
self._name_processed = False
|
182 |
+
|
183 |
+
self._first_chunk = True
|
184 |
+
|
185 |
+
self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"])
|
186 |
+
|
187 |
+
# Fallback to error to pass a sketchy test(test_override_set_noconvert_columns)
|
188 |
+
# Normally, this arg would get pre-processed earlier on
|
189 |
+
self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR)
|
190 |
+
|
191 |
+
def _validate_parse_dates_presence(self, columns: Sequence[Hashable]) -> Iterable:
|
192 |
+
"""
|
193 |
+
Check if parse_dates are in columns.
|
194 |
+
|
195 |
+
If user has provided names for parse_dates, check if those columns
|
196 |
+
are available.
|
197 |
+
|
198 |
+
Parameters
|
199 |
+
----------
|
200 |
+
columns : list
|
201 |
+
List of names of the dataframe.
|
202 |
+
|
203 |
+
Returns
|
204 |
+
-------
|
205 |
+
The names of the columns which will get parsed later if a dict or list
|
206 |
+
is given as specification.
|
207 |
+
|
208 |
+
Raises
|
209 |
+
------
|
210 |
+
ValueError
|
211 |
+
If column to parse_date is not in dataframe.
|
212 |
+
|
213 |
+
"""
|
214 |
+
cols_needed: Iterable
|
215 |
+
if is_dict_like(self.parse_dates):
|
216 |
+
cols_needed = itertools.chain(*self.parse_dates.values())
|
217 |
+
elif is_list_like(self.parse_dates):
|
218 |
+
# a column in parse_dates could be represented
|
219 |
+
# ColReference = Union[int, str]
|
220 |
+
# DateGroups = List[ColReference]
|
221 |
+
# ParseDates = Union[DateGroups, List[DateGroups],
|
222 |
+
# Dict[ColReference, DateGroups]]
|
223 |
+
cols_needed = itertools.chain.from_iterable(
|
224 |
+
col if is_list_like(col) and not isinstance(col, tuple) else [col]
|
225 |
+
for col in self.parse_dates
|
226 |
+
)
|
227 |
+
else:
|
228 |
+
cols_needed = []
|
229 |
+
|
230 |
+
cols_needed = list(cols_needed)
|
231 |
+
|
232 |
+
# get only columns that are references using names (str), not by index
|
233 |
+
missing_cols = ", ".join(
|
234 |
+
sorted(
|
235 |
+
{
|
236 |
+
col
|
237 |
+
for col in cols_needed
|
238 |
+
if isinstance(col, str) and col not in columns
|
239 |
+
}
|
240 |
+
)
|
241 |
+
)
|
242 |
+
if missing_cols:
|
243 |
+
raise ValueError(
|
244 |
+
f"Missing column provided to 'parse_dates': '{missing_cols}'"
|
245 |
+
)
|
246 |
+
# Convert positions to actual column names
|
247 |
+
return [
|
248 |
+
col if (isinstance(col, str) or col in columns) else columns[col]
|
249 |
+
for col in cols_needed
|
250 |
+
]
|
251 |
+
|
252 |
+
def close(self) -> None:
|
253 |
+
pass
|
254 |
+
|
255 |
+
@final
|
256 |
+
@property
|
257 |
+
def _has_complex_date_col(self) -> bool:
|
258 |
+
return isinstance(self.parse_dates, dict) or (
|
259 |
+
isinstance(self.parse_dates, list)
|
260 |
+
and len(self.parse_dates) > 0
|
261 |
+
and isinstance(self.parse_dates[0], list)
|
262 |
+
)
|
263 |
+
|
264 |
+
@final
|
265 |
+
def _should_parse_dates(self, i: int) -> bool:
|
266 |
+
if lib.is_bool(self.parse_dates):
|
267 |
+
return bool(self.parse_dates)
|
268 |
+
else:
|
269 |
+
if self.index_names is not None:
|
270 |
+
name = self.index_names[i]
|
271 |
+
else:
|
272 |
+
name = None
|
273 |
+
j = i if self.index_col is None else self.index_col[i]
|
274 |
+
|
275 |
+
return (j in self.parse_dates) or (
|
276 |
+
name is not None and name in self.parse_dates
|
277 |
+
)
|
278 |
+
|
279 |
+
@final
|
280 |
+
def _extract_multi_indexer_columns(
|
281 |
+
self,
|
282 |
+
header,
|
283 |
+
index_names: Sequence[Hashable] | None,
|
284 |
+
passed_names: bool = False,
|
285 |
+
) -> tuple[
|
286 |
+
Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool
|
287 |
+
]:
|
288 |
+
"""
|
289 |
+
Extract and return the names, index_names, col_names if the column
|
290 |
+
names are a MultiIndex.
|
291 |
+
|
292 |
+
Parameters
|
293 |
+
----------
|
294 |
+
header: list of lists
|
295 |
+
The header rows
|
296 |
+
index_names: list, optional
|
297 |
+
The names of the future index
|
298 |
+
passed_names: bool, default False
|
299 |
+
A flag specifying if names where passed
|
300 |
+
|
301 |
+
"""
|
302 |
+
if len(header) < 2:
|
303 |
+
return header[0], index_names, None, passed_names
|
304 |
+
|
305 |
+
# the names are the tuples of the header that are not the index cols
|
306 |
+
# 0 is the name of the index, assuming index_col is a list of column
|
307 |
+
# numbers
|
308 |
+
ic = self.index_col
|
309 |
+
if ic is None:
|
310 |
+
ic = []
|
311 |
+
|
312 |
+
if not isinstance(ic, (list, tuple, np.ndarray)):
|
313 |
+
ic = [ic]
|
314 |
+
sic = set(ic)
|
315 |
+
|
316 |
+
# clean the index_names
|
317 |
+
index_names = header.pop(-1)
|
318 |
+
index_names, _, _ = self._clean_index_names(index_names, self.index_col)
|
319 |
+
|
320 |
+
# extract the columns
|
321 |
+
field_count = len(header[0])
|
322 |
+
|
323 |
+
# check if header lengths are equal
|
324 |
+
if not all(len(header_iter) == field_count for header_iter in header[1:]):
|
325 |
+
raise ParserError("Header rows must have an equal number of columns.")
|
326 |
+
|
327 |
+
def extract(r):
|
328 |
+
return tuple(r[i] for i in range(field_count) if i not in sic)
|
329 |
+
|
330 |
+
columns = list(zip(*(extract(r) for r in header)))
|
331 |
+
names = columns.copy()
|
332 |
+
for single_ic in sorted(ic):
|
333 |
+
names.insert(single_ic, single_ic)
|
334 |
+
|
335 |
+
# Clean the column names (if we have an index_col).
|
336 |
+
if len(ic):
|
337 |
+
col_names = [
|
338 |
+
r[ic[0]]
|
339 |
+
if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols)
|
340 |
+
else None
|
341 |
+
for r in header
|
342 |
+
]
|
343 |
+
else:
|
344 |
+
col_names = [None] * len(header)
|
345 |
+
|
346 |
+
passed_names = True
|
347 |
+
|
348 |
+
return names, index_names, col_names, passed_names
|
349 |
+
|
350 |
+
@final
|
351 |
+
def _maybe_make_multi_index_columns(
|
352 |
+
self,
|
353 |
+
columns: Sequence[Hashable],
|
354 |
+
col_names: Sequence[Hashable] | None = None,
|
355 |
+
) -> Sequence[Hashable] | MultiIndex:
|
356 |
+
# possibly create a column mi here
|
357 |
+
if is_potential_multi_index(columns):
|
358 |
+
list_columns = cast(list[tuple], columns)
|
359 |
+
return MultiIndex.from_tuples(list_columns, names=col_names)
|
360 |
+
return columns
|
361 |
+
|
362 |
+
@final
|
363 |
+
def _make_index(
|
364 |
+
self, data, alldata, columns, indexnamerow: list[Scalar] | None = None
|
365 |
+
) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]:
|
366 |
+
index: Index | None
|
367 |
+
if not is_index_col(self.index_col) or not self.index_col:
|
368 |
+
index = None
|
369 |
+
|
370 |
+
elif not self._has_complex_date_col:
|
371 |
+
simple_index = self._get_simple_index(alldata, columns)
|
372 |
+
index = self._agg_index(simple_index)
|
373 |
+
elif self._has_complex_date_col:
|
374 |
+
if not self._name_processed:
|
375 |
+
(self.index_names, _, self.index_col) = self._clean_index_names(
|
376 |
+
list(columns), self.index_col
|
377 |
+
)
|
378 |
+
self._name_processed = True
|
379 |
+
date_index = self._get_complex_date_index(data, columns)
|
380 |
+
index = self._agg_index(date_index, try_parse_dates=False)
|
381 |
+
|
382 |
+
# add names for the index
|
383 |
+
if indexnamerow:
|
384 |
+
coffset = len(indexnamerow) - len(columns)
|
385 |
+
assert index is not None
|
386 |
+
index = index.set_names(indexnamerow[:coffset])
|
387 |
+
|
388 |
+
# maybe create a mi on the columns
|
389 |
+
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
|
390 |
+
|
391 |
+
return index, columns
|
392 |
+
|
393 |
+
@final
|
394 |
+
def _get_simple_index(self, data, columns):
|
395 |
+
def ix(col):
|
396 |
+
if not isinstance(col, str):
|
397 |
+
return col
|
398 |
+
raise ValueError(f"Index {col} invalid")
|
399 |
+
|
400 |
+
to_remove = []
|
401 |
+
index = []
|
402 |
+
for idx in self.index_col:
|
403 |
+
i = ix(idx)
|
404 |
+
to_remove.append(i)
|
405 |
+
index.append(data[i])
|
406 |
+
|
407 |
+
# remove index items from content and columns, don't pop in
|
408 |
+
# loop
|
409 |
+
for i in sorted(to_remove, reverse=True):
|
410 |
+
data.pop(i)
|
411 |
+
if not self._implicit_index:
|
412 |
+
columns.pop(i)
|
413 |
+
|
414 |
+
return index
|
415 |
+
|
416 |
+
@final
|
417 |
+
def _get_complex_date_index(self, data, col_names):
|
418 |
+
def _get_name(icol):
|
419 |
+
if isinstance(icol, str):
|
420 |
+
return icol
|
421 |
+
|
422 |
+
if col_names is None:
|
423 |
+
raise ValueError(f"Must supply column order to use {icol!s} as index")
|
424 |
+
|
425 |
+
for i, c in enumerate(col_names):
|
426 |
+
if i == icol:
|
427 |
+
return c
|
428 |
+
|
429 |
+
to_remove = []
|
430 |
+
index = []
|
431 |
+
for idx in self.index_col:
|
432 |
+
name = _get_name(idx)
|
433 |
+
to_remove.append(name)
|
434 |
+
index.append(data[name])
|
435 |
+
|
436 |
+
# remove index items from content and columns, don't pop in
|
437 |
+
# loop
|
438 |
+
for c in sorted(to_remove, reverse=True):
|
439 |
+
data.pop(c)
|
440 |
+
col_names.remove(c)
|
441 |
+
|
442 |
+
return index
|
443 |
+
|
444 |
+
@final
|
445 |
+
def _clean_mapping(self, mapping):
|
446 |
+
"""converts col numbers to names"""
|
447 |
+
if not isinstance(mapping, dict):
|
448 |
+
return mapping
|
449 |
+
clean = {}
|
450 |
+
# for mypy
|
451 |
+
assert self.orig_names is not None
|
452 |
+
|
453 |
+
for col, v in mapping.items():
|
454 |
+
if isinstance(col, int) and col not in self.orig_names:
|
455 |
+
col = self.orig_names[col]
|
456 |
+
clean[col] = v
|
457 |
+
if isinstance(mapping, defaultdict):
|
458 |
+
remaining_cols = set(self.orig_names) - set(clean.keys())
|
459 |
+
clean.update({col: mapping[col] for col in remaining_cols})
|
460 |
+
return clean
|
461 |
+
|
462 |
+
@final
|
463 |
+
def _agg_index(self, index, try_parse_dates: bool = True) -> Index:
|
464 |
+
arrays = []
|
465 |
+
converters = self._clean_mapping(self.converters)
|
466 |
+
|
467 |
+
for i, arr in enumerate(index):
|
468 |
+
if try_parse_dates and self._should_parse_dates(i):
|
469 |
+
arr = self._date_conv(
|
470 |
+
arr,
|
471 |
+
col=self.index_names[i] if self.index_names is not None else None,
|
472 |
+
)
|
473 |
+
|
474 |
+
if self.na_filter:
|
475 |
+
col_na_values = self.na_values
|
476 |
+
col_na_fvalues = self.na_fvalues
|
477 |
+
else:
|
478 |
+
col_na_values = set()
|
479 |
+
col_na_fvalues = set()
|
480 |
+
|
481 |
+
if isinstance(self.na_values, dict):
|
482 |
+
assert self.index_names is not None
|
483 |
+
col_name = self.index_names[i]
|
484 |
+
if col_name is not None:
|
485 |
+
col_na_values, col_na_fvalues = _get_na_values(
|
486 |
+
col_name, self.na_values, self.na_fvalues, self.keep_default_na
|
487 |
+
)
|
488 |
+
|
489 |
+
clean_dtypes = self._clean_mapping(self.dtype)
|
490 |
+
|
491 |
+
cast_type = None
|
492 |
+
index_converter = False
|
493 |
+
if self.index_names is not None:
|
494 |
+
if isinstance(clean_dtypes, dict):
|
495 |
+
cast_type = clean_dtypes.get(self.index_names[i], None)
|
496 |
+
|
497 |
+
if isinstance(converters, dict):
|
498 |
+
index_converter = converters.get(self.index_names[i]) is not None
|
499 |
+
|
500 |
+
try_num_bool = not (
|
501 |
+
cast_type and is_string_dtype(cast_type) or index_converter
|
502 |
+
)
|
503 |
+
|
504 |
+
arr, _ = self._infer_types(
|
505 |
+
arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool
|
506 |
+
)
|
507 |
+
arrays.append(arr)
|
508 |
+
|
509 |
+
names = self.index_names
|
510 |
+
index = ensure_index_from_sequences(arrays, names)
|
511 |
+
|
512 |
+
return index
|
513 |
+
|
514 |
+
@final
|
515 |
+
def _convert_to_ndarrays(
|
516 |
+
self,
|
517 |
+
dct: Mapping,
|
518 |
+
na_values,
|
519 |
+
na_fvalues,
|
520 |
+
verbose: bool = False,
|
521 |
+
converters=None,
|
522 |
+
dtypes=None,
|
523 |
+
):
|
524 |
+
result = {}
|
525 |
+
for c, values in dct.items():
|
526 |
+
conv_f = None if converters is None else converters.get(c, None)
|
527 |
+
if isinstance(dtypes, dict):
|
528 |
+
cast_type = dtypes.get(c, None)
|
529 |
+
else:
|
530 |
+
# single dtype or None
|
531 |
+
cast_type = dtypes
|
532 |
+
|
533 |
+
if self.na_filter:
|
534 |
+
col_na_values, col_na_fvalues = _get_na_values(
|
535 |
+
c, na_values, na_fvalues, self.keep_default_na
|
536 |
+
)
|
537 |
+
else:
|
538 |
+
col_na_values, col_na_fvalues = set(), set()
|
539 |
+
|
540 |
+
if c in self._parse_date_cols:
|
541 |
+
# GH#26203 Do not convert columns which get converted to dates
|
542 |
+
# but replace nans to ensure to_datetime works
|
543 |
+
mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues)
|
544 |
+
np.putmask(values, mask, np.nan)
|
545 |
+
result[c] = values
|
546 |
+
continue
|
547 |
+
|
548 |
+
if conv_f is not None:
|
549 |
+
# conv_f applied to data before inference
|
550 |
+
if cast_type is not None:
|
551 |
+
warnings.warn(
|
552 |
+
(
|
553 |
+
"Both a converter and dtype were specified "
|
554 |
+
f"for column {c} - only the converter will be used."
|
555 |
+
),
|
556 |
+
ParserWarning,
|
557 |
+
stacklevel=find_stack_level(),
|
558 |
+
)
|
559 |
+
|
560 |
+
try:
|
561 |
+
values = lib.map_infer(values, conv_f)
|
562 |
+
except ValueError:
|
563 |
+
mask = algorithms.isin(values, list(na_values)).view(np.uint8)
|
564 |
+
values = lib.map_infer_mask(values, conv_f, mask)
|
565 |
+
|
566 |
+
cvals, na_count = self._infer_types(
|
567 |
+
values,
|
568 |
+
set(col_na_values) | col_na_fvalues,
|
569 |
+
cast_type is None,
|
570 |
+
try_num_bool=False,
|
571 |
+
)
|
572 |
+
else:
|
573 |
+
is_ea = is_extension_array_dtype(cast_type)
|
574 |
+
is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
|
575 |
+
# skip inference if specified dtype is object
|
576 |
+
# or casting to an EA
|
577 |
+
try_num_bool = not (cast_type and is_str_or_ea_dtype)
|
578 |
+
|
579 |
+
# general type inference and conversion
|
580 |
+
cvals, na_count = self._infer_types(
|
581 |
+
values,
|
582 |
+
set(col_na_values) | col_na_fvalues,
|
583 |
+
cast_type is None,
|
584 |
+
try_num_bool,
|
585 |
+
)
|
586 |
+
|
587 |
+
# type specified in dtype param or cast_type is an EA
|
588 |
+
if cast_type is not None:
|
589 |
+
cast_type = pandas_dtype(cast_type)
|
590 |
+
if cast_type and (cvals.dtype != cast_type or is_ea):
|
591 |
+
if not is_ea and na_count > 0:
|
592 |
+
if is_bool_dtype(cast_type):
|
593 |
+
raise ValueError(f"Bool column has NA values in column {c}")
|
594 |
+
cvals = self._cast_types(cvals, cast_type, c)
|
595 |
+
|
596 |
+
result[c] = cvals
|
597 |
+
if verbose and na_count:
|
598 |
+
print(f"Filled {na_count} NA values in column {c!s}")
|
599 |
+
return result
|
600 |
+
|
601 |
+
@final
|
602 |
+
def _set_noconvert_dtype_columns(
|
603 |
+
self, col_indices: list[int], names: Sequence[Hashable]
|
604 |
+
) -> set[int]:
|
605 |
+
"""
|
606 |
+
Set the columns that should not undergo dtype conversions.
|
607 |
+
|
608 |
+
Currently, any column that is involved with date parsing will not
|
609 |
+
undergo such conversions. If usecols is specified, the positions of the columns
|
610 |
+
not to cast is relative to the usecols not to all columns.
|
611 |
+
|
612 |
+
Parameters
|
613 |
+
----------
|
614 |
+
col_indices: The indices specifying order and positions of the columns
|
615 |
+
names: The column names which order is corresponding with the order
|
616 |
+
of col_indices
|
617 |
+
|
618 |
+
Returns
|
619 |
+
-------
|
620 |
+
A set of integers containing the positions of the columns not to convert.
|
621 |
+
"""
|
622 |
+
usecols: list[int] | list[str] | None
|
623 |
+
noconvert_columns = set()
|
624 |
+
if self.usecols_dtype == "integer":
|
625 |
+
# A set of integers will be converted to a list in
|
626 |
+
# the correct order every single time.
|
627 |
+
usecols = sorted(self.usecols)
|
628 |
+
elif callable(self.usecols) or self.usecols_dtype not in ("empty", None):
|
629 |
+
# The names attribute should have the correct columns
|
630 |
+
# in the proper order for indexing with parse_dates.
|
631 |
+
usecols = col_indices
|
632 |
+
else:
|
633 |
+
# Usecols is empty.
|
634 |
+
usecols = None
|
635 |
+
|
636 |
+
def _set(x) -> int:
|
637 |
+
if usecols is not None and is_integer(x):
|
638 |
+
x = usecols[x]
|
639 |
+
|
640 |
+
if not is_integer(x):
|
641 |
+
x = col_indices[names.index(x)]
|
642 |
+
|
643 |
+
return x
|
644 |
+
|
645 |
+
if isinstance(self.parse_dates, list):
|
646 |
+
for val in self.parse_dates:
|
647 |
+
if isinstance(val, list):
|
648 |
+
for k in val:
|
649 |
+
noconvert_columns.add(_set(k))
|
650 |
+
else:
|
651 |
+
noconvert_columns.add(_set(val))
|
652 |
+
|
653 |
+
elif isinstance(self.parse_dates, dict):
|
654 |
+
for val in self.parse_dates.values():
|
655 |
+
if isinstance(val, list):
|
656 |
+
for k in val:
|
657 |
+
noconvert_columns.add(_set(k))
|
658 |
+
else:
|
659 |
+
noconvert_columns.add(_set(val))
|
660 |
+
|
661 |
+
elif self.parse_dates:
|
662 |
+
if isinstance(self.index_col, list):
|
663 |
+
for k in self.index_col:
|
664 |
+
noconvert_columns.add(_set(k))
|
665 |
+
elif self.index_col is not None:
|
666 |
+
noconvert_columns.add(_set(self.index_col))
|
667 |
+
|
668 |
+
return noconvert_columns
|
669 |
+
|
670 |
+
@final
|
671 |
+
def _infer_types(
|
672 |
+
self, values, na_values, no_dtype_specified, try_num_bool: bool = True
|
673 |
+
) -> tuple[ArrayLike, int]:
|
674 |
+
"""
|
675 |
+
Infer types of values, possibly casting
|
676 |
+
|
677 |
+
Parameters
|
678 |
+
----------
|
679 |
+
values : ndarray
|
680 |
+
na_values : set
|
681 |
+
no_dtype_specified: Specifies if we want to cast explicitly
|
682 |
+
try_num_bool : bool, default try
|
683 |
+
try to cast values to numeric (first preference) or boolean
|
684 |
+
|
685 |
+
Returns
|
686 |
+
-------
|
687 |
+
converted : ndarray or ExtensionArray
|
688 |
+
na_count : int
|
689 |
+
"""
|
690 |
+
na_count = 0
|
691 |
+
if issubclass(values.dtype.type, (np.number, np.bool_)):
|
692 |
+
# If our array has numeric dtype, we don't have to check for strings in isin
|
693 |
+
na_values = np.array([val for val in na_values if not isinstance(val, str)])
|
694 |
+
mask = algorithms.isin(values, na_values)
|
695 |
+
na_count = mask.astype("uint8", copy=False).sum()
|
696 |
+
if na_count > 0:
|
697 |
+
if is_integer_dtype(values):
|
698 |
+
values = values.astype(np.float64)
|
699 |
+
np.putmask(values, mask, np.nan)
|
700 |
+
return values, na_count
|
701 |
+
|
702 |
+
dtype_backend = self.dtype_backend
|
703 |
+
non_default_dtype_backend = (
|
704 |
+
no_dtype_specified and dtype_backend is not lib.no_default
|
705 |
+
)
|
706 |
+
result: ArrayLike
|
707 |
+
|
708 |
+
if try_num_bool and is_object_dtype(values.dtype):
|
709 |
+
# exclude e.g DatetimeIndex here
|
710 |
+
try:
|
711 |
+
result, result_mask = lib.maybe_convert_numeric(
|
712 |
+
values,
|
713 |
+
na_values,
|
714 |
+
False,
|
715 |
+
convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type]
|
716 |
+
)
|
717 |
+
except (ValueError, TypeError):
|
718 |
+
# e.g. encountering datetime string gets ValueError
|
719 |
+
# TypeError can be raised in floatify
|
720 |
+
na_count = parsers.sanitize_objects(values, na_values)
|
721 |
+
result = values
|
722 |
+
else:
|
723 |
+
if non_default_dtype_backend:
|
724 |
+
if result_mask is None:
|
725 |
+
result_mask = np.zeros(result.shape, dtype=np.bool_)
|
726 |
+
|
727 |
+
if result_mask.all():
|
728 |
+
result = IntegerArray(
|
729 |
+
np.ones(result_mask.shape, dtype=np.int64), result_mask
|
730 |
+
)
|
731 |
+
elif is_integer_dtype(result):
|
732 |
+
result = IntegerArray(result, result_mask)
|
733 |
+
elif is_bool_dtype(result):
|
734 |
+
result = BooleanArray(result, result_mask)
|
735 |
+
elif is_float_dtype(result):
|
736 |
+
result = FloatingArray(result, result_mask)
|
737 |
+
|
738 |
+
na_count = result_mask.sum()
|
739 |
+
else:
|
740 |
+
na_count = isna(result).sum()
|
741 |
+
else:
|
742 |
+
result = values
|
743 |
+
if values.dtype == np.object_:
|
744 |
+
na_count = parsers.sanitize_objects(values, na_values)
|
745 |
+
|
746 |
+
if result.dtype == np.object_ and try_num_bool:
|
747 |
+
result, bool_mask = libops.maybe_convert_bool(
|
748 |
+
np.asarray(values),
|
749 |
+
true_values=self.true_values,
|
750 |
+
false_values=self.false_values,
|
751 |
+
convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type]
|
752 |
+
)
|
753 |
+
if result.dtype == np.bool_ and non_default_dtype_backend:
|
754 |
+
if bool_mask is None:
|
755 |
+
bool_mask = np.zeros(result.shape, dtype=np.bool_)
|
756 |
+
result = BooleanArray(result, bool_mask)
|
757 |
+
elif result.dtype == np.object_ and non_default_dtype_backend:
|
758 |
+
# read_excel sends array of datetime objects
|
759 |
+
if not lib.is_datetime_array(result, skipna=True):
|
760 |
+
dtype = StringDtype()
|
761 |
+
cls = dtype.construct_array_type()
|
762 |
+
result = cls._from_sequence(values, dtype=dtype)
|
763 |
+
|
764 |
+
if dtype_backend == "pyarrow":
|
765 |
+
pa = import_optional_dependency("pyarrow")
|
766 |
+
if isinstance(result, np.ndarray):
|
767 |
+
result = ArrowExtensionArray(pa.array(result, from_pandas=True))
|
768 |
+
elif isinstance(result, BaseMaskedArray):
|
769 |
+
if result._mask.all():
|
770 |
+
# We want an arrow null array here
|
771 |
+
result = ArrowExtensionArray(pa.array([None] * len(result)))
|
772 |
+
else:
|
773 |
+
result = ArrowExtensionArray(
|
774 |
+
pa.array(result._data, mask=result._mask)
|
775 |
+
)
|
776 |
+
else:
|
777 |
+
result = ArrowExtensionArray(
|
778 |
+
pa.array(result.to_numpy(), from_pandas=True)
|
779 |
+
)
|
780 |
+
|
781 |
+
return result, na_count
|
782 |
+
|
783 |
+
@final
|
784 |
+
def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike:
|
785 |
+
"""
|
786 |
+
Cast values to specified type
|
787 |
+
|
788 |
+
Parameters
|
789 |
+
----------
|
790 |
+
values : ndarray or ExtensionArray
|
791 |
+
cast_type : np.dtype or ExtensionDtype
|
792 |
+
dtype to cast values to
|
793 |
+
column : string
|
794 |
+
column name - used only for error reporting
|
795 |
+
|
796 |
+
Returns
|
797 |
+
-------
|
798 |
+
converted : ndarray or ExtensionArray
|
799 |
+
"""
|
800 |
+
if isinstance(cast_type, CategoricalDtype):
|
801 |
+
known_cats = cast_type.categories is not None
|
802 |
+
|
803 |
+
if not is_object_dtype(values.dtype) and not known_cats:
|
804 |
+
# TODO: this is for consistency with
|
805 |
+
# c-parser which parses all categories
|
806 |
+
# as strings
|
807 |
+
values = lib.ensure_string_array(
|
808 |
+
values, skipna=False, convert_na_value=False
|
809 |
+
)
|
810 |
+
|
811 |
+
cats = Index(values).unique().dropna()
|
812 |
+
values = Categorical._from_inferred_categories(
|
813 |
+
cats, cats.get_indexer(values), cast_type, true_values=self.true_values
|
814 |
+
)
|
815 |
+
|
816 |
+
# use the EA's implementation of casting
|
817 |
+
elif isinstance(cast_type, ExtensionDtype):
|
818 |
+
array_type = cast_type.construct_array_type()
|
819 |
+
try:
|
820 |
+
if isinstance(cast_type, BooleanDtype):
|
821 |
+
# error: Unexpected keyword argument "true_values" for
|
822 |
+
# "_from_sequence_of_strings" of "ExtensionArray"
|
823 |
+
return array_type._from_sequence_of_strings( # type: ignore[call-arg]
|
824 |
+
values,
|
825 |
+
dtype=cast_type,
|
826 |
+
true_values=self.true_values,
|
827 |
+
false_values=self.false_values,
|
828 |
+
)
|
829 |
+
else:
|
830 |
+
return array_type._from_sequence_of_strings(values, dtype=cast_type)
|
831 |
+
except NotImplementedError as err:
|
832 |
+
raise NotImplementedError(
|
833 |
+
f"Extension Array: {array_type} must implement "
|
834 |
+
"_from_sequence_of_strings in order to be used in parser methods"
|
835 |
+
) from err
|
836 |
+
|
837 |
+
elif isinstance(values, ExtensionArray):
|
838 |
+
values = values.astype(cast_type, copy=False)
|
839 |
+
elif issubclass(cast_type.type, str):
|
840 |
+
# TODO: why skipna=True here and False above? some tests depend
|
841 |
+
# on it here, but nothing fails if we change it above
|
842 |
+
# (as no tests get there as of 2022-12-06)
|
843 |
+
values = lib.ensure_string_array(
|
844 |
+
values, skipna=True, convert_na_value=False
|
845 |
+
)
|
846 |
+
else:
|
847 |
+
try:
|
848 |
+
values = astype_array(values, cast_type, copy=True)
|
849 |
+
except ValueError as err:
|
850 |
+
raise ValueError(
|
851 |
+
f"Unable to convert column {column} to type {cast_type}"
|
852 |
+
) from err
|
853 |
+
return values
|
854 |
+
|
855 |
+
@overload
|
856 |
+
def _do_date_conversions(
|
857 |
+
self,
|
858 |
+
names: Index,
|
859 |
+
data: DataFrame,
|
860 |
+
) -> tuple[Sequence[Hashable] | Index, DataFrame]:
|
861 |
+
...
|
862 |
+
|
863 |
+
@overload
|
864 |
+
def _do_date_conversions(
|
865 |
+
self,
|
866 |
+
names: Sequence[Hashable],
|
867 |
+
data: Mapping[Hashable, ArrayLike],
|
868 |
+
) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]:
|
869 |
+
...
|
870 |
+
|
871 |
+
@final
|
872 |
+
def _do_date_conversions(
|
873 |
+
self,
|
874 |
+
names: Sequence[Hashable] | Index,
|
875 |
+
data: Mapping[Hashable, ArrayLike] | DataFrame,
|
876 |
+
) -> tuple[Sequence[Hashable] | Index, Mapping[Hashable, ArrayLike] | DataFrame]:
|
877 |
+
# returns data, columns
|
878 |
+
|
879 |
+
if self.parse_dates is not None:
|
880 |
+
data, names = _process_date_conversion(
|
881 |
+
data,
|
882 |
+
self._date_conv,
|
883 |
+
self.parse_dates,
|
884 |
+
self.index_col,
|
885 |
+
self.index_names,
|
886 |
+
names,
|
887 |
+
keep_date_col=self.keep_date_col,
|
888 |
+
dtype_backend=self.dtype_backend,
|
889 |
+
)
|
890 |
+
|
891 |
+
return names, data
|
892 |
+
|
893 |
+
@final
|
894 |
+
def _check_data_length(
|
895 |
+
self,
|
896 |
+
columns: Sequence[Hashable],
|
897 |
+
data: Sequence[ArrayLike],
|
898 |
+
) -> None:
|
899 |
+
"""Checks if length of data is equal to length of column names.
|
900 |
+
|
901 |
+
One set of trailing commas is allowed. self.index_col not False
|
902 |
+
results in a ParserError previously when lengths do not match.
|
903 |
+
|
904 |
+
Parameters
|
905 |
+
----------
|
906 |
+
columns: list of column names
|
907 |
+
data: list of array-likes containing the data column-wise.
|
908 |
+
"""
|
909 |
+
if not self.index_col and len(columns) != len(data) and columns:
|
910 |
+
empty_str = is_object_dtype(data[-1]) and data[-1] == ""
|
911 |
+
# error: No overload variant of "__ror__" of "ndarray" matches
|
912 |
+
# argument type "ExtensionArray"
|
913 |
+
empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator]
|
914 |
+
if len(columns) == len(data) - 1 and np.all(empty_str_or_na):
|
915 |
+
return
|
916 |
+
warnings.warn(
|
917 |
+
"Length of header or names does not match length of data. This leads "
|
918 |
+
"to a loss of data with index_col=False.",
|
919 |
+
ParserWarning,
|
920 |
+
stacklevel=find_stack_level(),
|
921 |
+
)
|
922 |
+
|
923 |
+
@overload
|
924 |
+
def _evaluate_usecols(
|
925 |
+
self,
|
926 |
+
usecols: set[int] | Callable[[Hashable], object],
|
927 |
+
names: Sequence[Hashable],
|
928 |
+
) -> set[int]:
|
929 |
+
...
|
930 |
+
|
931 |
+
@overload
|
932 |
+
def _evaluate_usecols(
|
933 |
+
self, usecols: set[str], names: Sequence[Hashable]
|
934 |
+
) -> set[str]:
|
935 |
+
...
|
936 |
+
|
937 |
+
@final
|
938 |
+
def _evaluate_usecols(
|
939 |
+
self,
|
940 |
+
usecols: Callable[[Hashable], object] | set[str] | set[int],
|
941 |
+
names: Sequence[Hashable],
|
942 |
+
) -> set[str] | set[int]:
|
943 |
+
"""
|
944 |
+
Check whether or not the 'usecols' parameter
|
945 |
+
is a callable. If so, enumerates the 'names'
|
946 |
+
parameter and returns a set of indices for
|
947 |
+
each entry in 'names' that evaluates to True.
|
948 |
+
If not a callable, returns 'usecols'.
|
949 |
+
"""
|
950 |
+
if callable(usecols):
|
951 |
+
return {i for i, name in enumerate(names) if usecols(name)}
|
952 |
+
return usecols
|
953 |
+
|
954 |
+
@final
|
955 |
+
def _validate_usecols_names(self, usecols, names: Sequence):
|
956 |
+
"""
|
957 |
+
Validates that all usecols are present in a given
|
958 |
+
list of names. If not, raise a ValueError that
|
959 |
+
shows what usecols are missing.
|
960 |
+
|
961 |
+
Parameters
|
962 |
+
----------
|
963 |
+
usecols : iterable of usecols
|
964 |
+
The columns to validate are present in names.
|
965 |
+
names : iterable of names
|
966 |
+
The column names to check against.
|
967 |
+
|
968 |
+
Returns
|
969 |
+
-------
|
970 |
+
usecols : iterable of usecols
|
971 |
+
The `usecols` parameter if the validation succeeds.
|
972 |
+
|
973 |
+
Raises
|
974 |
+
------
|
975 |
+
ValueError : Columns were missing. Error message will list them.
|
976 |
+
"""
|
977 |
+
missing = [c for c in usecols if c not in names]
|
978 |
+
if len(missing) > 0:
|
979 |
+
raise ValueError(
|
980 |
+
f"Usecols do not match columns, columns expected but not found: "
|
981 |
+
f"{missing}"
|
982 |
+
)
|
983 |
+
|
984 |
+
return usecols
|
985 |
+
|
986 |
+
@final
|
987 |
+
def _validate_usecols_arg(self, usecols):
|
988 |
+
"""
|
989 |
+
Validate the 'usecols' parameter.
|
990 |
+
|
991 |
+
Checks whether or not the 'usecols' parameter contains all integers
|
992 |
+
(column selection by index), strings (column by name) or is a callable.
|
993 |
+
Raises a ValueError if that is not the case.
|
994 |
+
|
995 |
+
Parameters
|
996 |
+
----------
|
997 |
+
usecols : list-like, callable, or None
|
998 |
+
List of columns to use when parsing or a callable that can be used
|
999 |
+
to filter a list of table columns.
|
1000 |
+
|
1001 |
+
Returns
|
1002 |
+
-------
|
1003 |
+
usecols_tuple : tuple
|
1004 |
+
A tuple of (verified_usecols, usecols_dtype).
|
1005 |
+
|
1006 |
+
'verified_usecols' is either a set if an array-like is passed in or
|
1007 |
+
'usecols' if a callable or None is passed in.
|
1008 |
+
|
1009 |
+
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
|
1010 |
+
is passed in or None if a callable or None is passed in.
|
1011 |
+
"""
|
1012 |
+
msg = (
|
1013 |
+
"'usecols' must either be list-like of all strings, all unicode, "
|
1014 |
+
"all integers or a callable."
|
1015 |
+
)
|
1016 |
+
if usecols is not None:
|
1017 |
+
if callable(usecols):
|
1018 |
+
return usecols, None
|
1019 |
+
|
1020 |
+
if not is_list_like(usecols):
|
1021 |
+
# see gh-20529
|
1022 |
+
#
|
1023 |
+
# Ensure it is iterable container but not string.
|
1024 |
+
raise ValueError(msg)
|
1025 |
+
|
1026 |
+
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
|
1027 |
+
|
1028 |
+
if usecols_dtype not in ("empty", "integer", "string"):
|
1029 |
+
raise ValueError(msg)
|
1030 |
+
|
1031 |
+
usecols = set(usecols)
|
1032 |
+
|
1033 |
+
return usecols, usecols_dtype
|
1034 |
+
return usecols, None
|
1035 |
+
|
1036 |
+
@final
|
1037 |
+
def _clean_index_names(self, columns, index_col) -> tuple[list | None, list, list]:
|
1038 |
+
if not is_index_col(index_col):
|
1039 |
+
return None, columns, index_col
|
1040 |
+
|
1041 |
+
columns = list(columns)
|
1042 |
+
|
1043 |
+
# In case of no rows and multiindex columns we have to set index_names to
|
1044 |
+
# list of Nones GH#38292
|
1045 |
+
if not columns:
|
1046 |
+
return [None] * len(index_col), columns, index_col
|
1047 |
+
|
1048 |
+
cp_cols = list(columns)
|
1049 |
+
index_names: list[str | int | None] = []
|
1050 |
+
|
1051 |
+
# don't mutate
|
1052 |
+
index_col = list(index_col)
|
1053 |
+
|
1054 |
+
for i, c in enumerate(index_col):
|
1055 |
+
if isinstance(c, str):
|
1056 |
+
index_names.append(c)
|
1057 |
+
for j, name in enumerate(cp_cols):
|
1058 |
+
if name == c:
|
1059 |
+
index_col[i] = j
|
1060 |
+
columns.remove(name)
|
1061 |
+
break
|
1062 |
+
else:
|
1063 |
+
name = cp_cols[c]
|
1064 |
+
columns.remove(name)
|
1065 |
+
index_names.append(name)
|
1066 |
+
|
1067 |
+
# Only clean index names that were placeholders.
|
1068 |
+
for i, name in enumerate(index_names):
|
1069 |
+
if isinstance(name, str) and name in self.unnamed_cols:
|
1070 |
+
index_names[i] = None
|
1071 |
+
|
1072 |
+
return index_names, columns, index_col
|
1073 |
+
|
1074 |
+
@final
|
1075 |
+
def _get_empty_meta(self, columns, dtype: DtypeArg | None = None):
|
1076 |
+
columns = list(columns)
|
1077 |
+
|
1078 |
+
index_col = self.index_col
|
1079 |
+
index_names = self.index_names
|
1080 |
+
|
1081 |
+
# Convert `dtype` to a defaultdict of some kind.
|
1082 |
+
# This will enable us to write `dtype[col_name]`
|
1083 |
+
# without worrying about KeyError issues later on.
|
1084 |
+
dtype_dict: defaultdict[Hashable, Any]
|
1085 |
+
if not is_dict_like(dtype):
|
1086 |
+
# if dtype == None, default will be object.
|
1087 |
+
default_dtype = dtype or object
|
1088 |
+
dtype_dict = defaultdict(lambda: default_dtype)
|
1089 |
+
else:
|
1090 |
+
dtype = cast(dict, dtype)
|
1091 |
+
dtype_dict = defaultdict(
|
1092 |
+
lambda: object,
|
1093 |
+
{columns[k] if is_integer(k) else k: v for k, v in dtype.items()},
|
1094 |
+
)
|
1095 |
+
|
1096 |
+
# Even though we have no data, the "index" of the empty DataFrame
|
1097 |
+
# could for example still be an empty MultiIndex. Thus, we need to
|
1098 |
+
# check whether we have any index columns specified, via either:
|
1099 |
+
#
|
1100 |
+
# 1) index_col (column indices)
|
1101 |
+
# 2) index_names (column names)
|
1102 |
+
#
|
1103 |
+
# Both must be non-null to ensure a successful construction. Otherwise,
|
1104 |
+
# we have to create a generic empty Index.
|
1105 |
+
index: Index
|
1106 |
+
if (index_col is None or index_col is False) or index_names is None:
|
1107 |
+
index = default_index(0)
|
1108 |
+
else:
|
1109 |
+
data = [Series([], dtype=dtype_dict[name]) for name in index_names]
|
1110 |
+
index = ensure_index_from_sequences(data, names=index_names)
|
1111 |
+
index_col.sort()
|
1112 |
+
|
1113 |
+
for i, n in enumerate(index_col):
|
1114 |
+
columns.pop(n - i)
|
1115 |
+
|
1116 |
+
col_dict = {
|
1117 |
+
col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns
|
1118 |
+
}
|
1119 |
+
|
1120 |
+
return index, columns, col_dict
|
1121 |
+
|
1122 |
+
|
1123 |
+
def _make_date_converter(
|
1124 |
+
date_parser=lib.no_default,
|
1125 |
+
dayfirst: bool = False,
|
1126 |
+
cache_dates: bool = True,
|
1127 |
+
date_format: dict[Hashable, str] | str | None = None,
|
1128 |
+
):
|
1129 |
+
if date_parser is not lib.no_default:
|
1130 |
+
warnings.warn(
|
1131 |
+
"The argument 'date_parser' is deprecated and will "
|
1132 |
+
"be removed in a future version. "
|
1133 |
+
"Please use 'date_format' instead, or read your data in as 'object' dtype "
|
1134 |
+
"and then call 'to_datetime'.",
|
1135 |
+
FutureWarning,
|
1136 |
+
stacklevel=find_stack_level(),
|
1137 |
+
)
|
1138 |
+
if date_parser is not lib.no_default and date_format is not None:
|
1139 |
+
raise TypeError("Cannot use both 'date_parser' and 'date_format'")
|
1140 |
+
|
1141 |
+
def unpack_if_single_element(arg):
|
1142 |
+
# NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615
|
1143 |
+
if isinstance(arg, np.ndarray) and arg.ndim == 1 and len(arg) == 1:
|
1144 |
+
return arg[0]
|
1145 |
+
return arg
|
1146 |
+
|
1147 |
+
def converter(*date_cols, col: Hashable):
|
1148 |
+
if len(date_cols) == 1 and date_cols[0].dtype.kind in "Mm":
|
1149 |
+
return date_cols[0]
|
1150 |
+
|
1151 |
+
if date_parser is lib.no_default:
|
1152 |
+
strs = parsing.concat_date_cols(date_cols)
|
1153 |
+
date_fmt = (
|
1154 |
+
date_format.get(col) if isinstance(date_format, dict) else date_format
|
1155 |
+
)
|
1156 |
+
|
1157 |
+
with warnings.catch_warnings():
|
1158 |
+
warnings.filterwarnings(
|
1159 |
+
"ignore",
|
1160 |
+
".*parsing datetimes with mixed time zones will raise an error",
|
1161 |
+
category=FutureWarning,
|
1162 |
+
)
|
1163 |
+
str_objs = ensure_object(strs)
|
1164 |
+
try:
|
1165 |
+
result = tools.to_datetime(
|
1166 |
+
str_objs,
|
1167 |
+
format=date_fmt,
|
1168 |
+
utc=False,
|
1169 |
+
dayfirst=dayfirst,
|
1170 |
+
cache=cache_dates,
|
1171 |
+
)
|
1172 |
+
except (ValueError, TypeError):
|
1173 |
+
# test_usecols_with_parse_dates4
|
1174 |
+
return str_objs
|
1175 |
+
|
1176 |
+
if isinstance(result, DatetimeIndex):
|
1177 |
+
arr = result.to_numpy()
|
1178 |
+
arr.flags.writeable = True
|
1179 |
+
return arr
|
1180 |
+
return result._values
|
1181 |
+
else:
|
1182 |
+
try:
|
1183 |
+
with warnings.catch_warnings():
|
1184 |
+
warnings.filterwarnings(
|
1185 |
+
"ignore",
|
1186 |
+
".*parsing datetimes with mixed time zones "
|
1187 |
+
"will raise an error",
|
1188 |
+
category=FutureWarning,
|
1189 |
+
)
|
1190 |
+
pre_parsed = date_parser(
|
1191 |
+
*(unpack_if_single_element(arg) for arg in date_cols)
|
1192 |
+
)
|
1193 |
+
try:
|
1194 |
+
result = tools.to_datetime(
|
1195 |
+
pre_parsed,
|
1196 |
+
cache=cache_dates,
|
1197 |
+
)
|
1198 |
+
except (ValueError, TypeError):
|
1199 |
+
# test_read_csv_with_custom_date_parser
|
1200 |
+
result = pre_parsed
|
1201 |
+
if isinstance(result, datetime.datetime):
|
1202 |
+
raise Exception("scalar parser")
|
1203 |
+
return result
|
1204 |
+
except Exception:
|
1205 |
+
# e.g. test_datetime_fractional_seconds
|
1206 |
+
with warnings.catch_warnings():
|
1207 |
+
warnings.filterwarnings(
|
1208 |
+
"ignore",
|
1209 |
+
".*parsing datetimes with mixed time zones "
|
1210 |
+
"will raise an error",
|
1211 |
+
category=FutureWarning,
|
1212 |
+
)
|
1213 |
+
pre_parsed = parsing.try_parse_dates(
|
1214 |
+
parsing.concat_date_cols(date_cols),
|
1215 |
+
parser=date_parser,
|
1216 |
+
)
|
1217 |
+
try:
|
1218 |
+
return tools.to_datetime(pre_parsed)
|
1219 |
+
except (ValueError, TypeError):
|
1220 |
+
# TODO: not reached in tests 2023-10-27; needed?
|
1221 |
+
return pre_parsed
|
1222 |
+
|
1223 |
+
return converter
|
1224 |
+
|
1225 |
+
|
1226 |
+
parser_defaults = {
|
1227 |
+
"delimiter": None,
|
1228 |
+
"escapechar": None,
|
1229 |
+
"quotechar": '"',
|
1230 |
+
"quoting": csv.QUOTE_MINIMAL,
|
1231 |
+
"doublequote": True,
|
1232 |
+
"skipinitialspace": False,
|
1233 |
+
"lineterminator": None,
|
1234 |
+
"header": "infer",
|
1235 |
+
"index_col": None,
|
1236 |
+
"names": None,
|
1237 |
+
"skiprows": None,
|
1238 |
+
"skipfooter": 0,
|
1239 |
+
"nrows": None,
|
1240 |
+
"na_values": None,
|
1241 |
+
"keep_default_na": True,
|
1242 |
+
"true_values": None,
|
1243 |
+
"false_values": None,
|
1244 |
+
"converters": None,
|
1245 |
+
"dtype": None,
|
1246 |
+
"cache_dates": True,
|
1247 |
+
"thousands": None,
|
1248 |
+
"comment": None,
|
1249 |
+
"decimal": ".",
|
1250 |
+
# 'engine': 'c',
|
1251 |
+
"parse_dates": False,
|
1252 |
+
"keep_date_col": False,
|
1253 |
+
"dayfirst": False,
|
1254 |
+
"date_parser": lib.no_default,
|
1255 |
+
"date_format": None,
|
1256 |
+
"usecols": None,
|
1257 |
+
# 'iterator': False,
|
1258 |
+
"chunksize": None,
|
1259 |
+
"verbose": False,
|
1260 |
+
"encoding": None,
|
1261 |
+
"compression": None,
|
1262 |
+
"skip_blank_lines": True,
|
1263 |
+
"encoding_errors": "strict",
|
1264 |
+
"on_bad_lines": ParserBase.BadLineHandleMethod.ERROR,
|
1265 |
+
"dtype_backend": lib.no_default,
|
1266 |
+
}
|
1267 |
+
|
1268 |
+
|
1269 |
+
def _process_date_conversion(
|
1270 |
+
data_dict,
|
1271 |
+
converter: Callable,
|
1272 |
+
parse_spec,
|
1273 |
+
index_col,
|
1274 |
+
index_names,
|
1275 |
+
columns,
|
1276 |
+
keep_date_col: bool = False,
|
1277 |
+
dtype_backend=lib.no_default,
|
1278 |
+
):
|
1279 |
+
def _isindex(colspec):
|
1280 |
+
return (isinstance(index_col, list) and colspec in index_col) or (
|
1281 |
+
isinstance(index_names, list) and colspec in index_names
|
1282 |
+
)
|
1283 |
+
|
1284 |
+
new_cols = []
|
1285 |
+
new_data = {}
|
1286 |
+
|
1287 |
+
orig_names = columns
|
1288 |
+
columns = list(columns)
|
1289 |
+
|
1290 |
+
date_cols = set()
|
1291 |
+
|
1292 |
+
if parse_spec is None or isinstance(parse_spec, bool):
|
1293 |
+
return data_dict, columns
|
1294 |
+
|
1295 |
+
if isinstance(parse_spec, list):
|
1296 |
+
# list of column lists
|
1297 |
+
for colspec in parse_spec:
|
1298 |
+
if is_scalar(colspec) or isinstance(colspec, tuple):
|
1299 |
+
if isinstance(colspec, int) and colspec not in data_dict:
|
1300 |
+
colspec = orig_names[colspec]
|
1301 |
+
if _isindex(colspec):
|
1302 |
+
continue
|
1303 |
+
elif dtype_backend == "pyarrow":
|
1304 |
+
import pyarrow as pa
|
1305 |
+
|
1306 |
+
dtype = data_dict[colspec].dtype
|
1307 |
+
if isinstance(dtype, ArrowDtype) and (
|
1308 |
+
pa.types.is_timestamp(dtype.pyarrow_dtype)
|
1309 |
+
or pa.types.is_date(dtype.pyarrow_dtype)
|
1310 |
+
):
|
1311 |
+
continue
|
1312 |
+
|
1313 |
+
# Pyarrow engine returns Series which we need to convert to
|
1314 |
+
# numpy array before converter, its a no-op for other parsers
|
1315 |
+
data_dict[colspec] = converter(
|
1316 |
+
np.asarray(data_dict[colspec]), col=colspec
|
1317 |
+
)
|
1318 |
+
else:
|
1319 |
+
new_name, col, old_names = _try_convert_dates(
|
1320 |
+
converter, colspec, data_dict, orig_names
|
1321 |
+
)
|
1322 |
+
if new_name in data_dict:
|
1323 |
+
raise ValueError(f"New date column already in dict {new_name}")
|
1324 |
+
new_data[new_name] = col
|
1325 |
+
new_cols.append(new_name)
|
1326 |
+
date_cols.update(old_names)
|
1327 |
+
|
1328 |
+
elif isinstance(parse_spec, dict):
|
1329 |
+
# dict of new name to column list
|
1330 |
+
for new_name, colspec in parse_spec.items():
|
1331 |
+
if new_name in data_dict:
|
1332 |
+
raise ValueError(f"Date column {new_name} already in dict")
|
1333 |
+
|
1334 |
+
_, col, old_names = _try_convert_dates(
|
1335 |
+
converter,
|
1336 |
+
colspec,
|
1337 |
+
data_dict,
|
1338 |
+
orig_names,
|
1339 |
+
target_name=new_name,
|
1340 |
+
)
|
1341 |
+
|
1342 |
+
new_data[new_name] = col
|
1343 |
+
|
1344 |
+
# If original column can be converted to date we keep the converted values
|
1345 |
+
# This can only happen if values are from single column
|
1346 |
+
if len(colspec) == 1:
|
1347 |
+
new_data[colspec[0]] = col
|
1348 |
+
|
1349 |
+
new_cols.append(new_name)
|
1350 |
+
date_cols.update(old_names)
|
1351 |
+
|
1352 |
+
if isinstance(data_dict, DataFrame):
|
1353 |
+
data_dict = concat([DataFrame(new_data), data_dict], axis=1, copy=False)
|
1354 |
+
else:
|
1355 |
+
data_dict.update(new_data)
|
1356 |
+
new_cols.extend(columns)
|
1357 |
+
|
1358 |
+
if not keep_date_col:
|
1359 |
+
for c in list(date_cols):
|
1360 |
+
data_dict.pop(c)
|
1361 |
+
new_cols.remove(c)
|
1362 |
+
|
1363 |
+
return data_dict, new_cols
|
1364 |
+
|
1365 |
+
|
1366 |
+
def _try_convert_dates(
|
1367 |
+
parser: Callable, colspec, data_dict, columns, target_name: str | None = None
|
1368 |
+
):
|
1369 |
+
colset = set(columns)
|
1370 |
+
colnames = []
|
1371 |
+
|
1372 |
+
for c in colspec:
|
1373 |
+
if c in colset:
|
1374 |
+
colnames.append(c)
|
1375 |
+
elif isinstance(c, int) and c not in columns:
|
1376 |
+
colnames.append(columns[c])
|
1377 |
+
else:
|
1378 |
+
colnames.append(c)
|
1379 |
+
|
1380 |
+
new_name: tuple | str
|
1381 |
+
if all(isinstance(x, tuple) for x in colnames):
|
1382 |
+
new_name = tuple(map("_".join, zip(*colnames)))
|
1383 |
+
else:
|
1384 |
+
new_name = "_".join([str(x) for x in colnames])
|
1385 |
+
to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict]
|
1386 |
+
|
1387 |
+
new_col = parser(*to_parse, col=new_name if target_name is None else target_name)
|
1388 |
+
return new_name, new_col, colnames
|
1389 |
+
|
1390 |
+
|
1391 |
+
def _get_na_values(col, na_values, na_fvalues, keep_default_na: bool):
|
1392 |
+
"""
|
1393 |
+
Get the NaN values for a given column.
|
1394 |
+
|
1395 |
+
Parameters
|
1396 |
+
----------
|
1397 |
+
col : str
|
1398 |
+
The name of the column.
|
1399 |
+
na_values : array-like, dict
|
1400 |
+
The object listing the NaN values as strings.
|
1401 |
+
na_fvalues : array-like, dict
|
1402 |
+
The object listing the NaN values as floats.
|
1403 |
+
keep_default_na : bool
|
1404 |
+
If `na_values` is a dict, and the column is not mapped in the
|
1405 |
+
dictionary, whether to return the default NaN values or the empty set.
|
1406 |
+
|
1407 |
+
Returns
|
1408 |
+
-------
|
1409 |
+
nan_tuple : A length-two tuple composed of
|
1410 |
+
|
1411 |
+
1) na_values : the string NaN values for that column.
|
1412 |
+
2) na_fvalues : the float NaN values for that column.
|
1413 |
+
"""
|
1414 |
+
if isinstance(na_values, dict):
|
1415 |
+
if col in na_values:
|
1416 |
+
return na_values[col], na_fvalues[col]
|
1417 |
+
else:
|
1418 |
+
if keep_default_na:
|
1419 |
+
return STR_NA_VALUES, set()
|
1420 |
+
|
1421 |
+
return set(), set()
|
1422 |
+
else:
|
1423 |
+
return na_values, na_fvalues
|
1424 |
+
|
1425 |
+
|
1426 |
+
def _validate_parse_dates_arg(parse_dates):
|
1427 |
+
"""
|
1428 |
+
Check whether or not the 'parse_dates' parameter
|
1429 |
+
is a non-boolean scalar. Raises a ValueError if
|
1430 |
+
that is the case.
|
1431 |
+
"""
|
1432 |
+
msg = (
|
1433 |
+
"Only booleans, lists, and dictionaries are accepted "
|
1434 |
+
"for the 'parse_dates' parameter"
|
1435 |
+
)
|
1436 |
+
|
1437 |
+
if not (
|
1438 |
+
parse_dates is None
|
1439 |
+
or lib.is_bool(parse_dates)
|
1440 |
+
or isinstance(parse_dates, (list, dict))
|
1441 |
+
):
|
1442 |
+
raise TypeError(msg)
|
1443 |
+
|
1444 |
+
return parse_dates
|
1445 |
+
|
1446 |
+
|
1447 |
+
def is_index_col(col) -> bool:
|
1448 |
+
return col is not None and col is not False
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py
ADDED
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections import defaultdict
|
4 |
+
from typing import TYPE_CHECKING
|
5 |
+
import warnings
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
from pandas._libs import (
|
10 |
+
lib,
|
11 |
+
parsers,
|
12 |
+
)
|
13 |
+
from pandas.compat._optional import import_optional_dependency
|
14 |
+
from pandas.errors import DtypeWarning
|
15 |
+
from pandas.util._exceptions import find_stack_level
|
16 |
+
|
17 |
+
from pandas.core.dtypes.common import pandas_dtype
|
18 |
+
from pandas.core.dtypes.concat import (
|
19 |
+
concat_compat,
|
20 |
+
union_categoricals,
|
21 |
+
)
|
22 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
23 |
+
|
24 |
+
from pandas.core.indexes.api import ensure_index_from_sequences
|
25 |
+
|
26 |
+
from pandas.io.common import (
|
27 |
+
dedup_names,
|
28 |
+
is_potential_multi_index,
|
29 |
+
)
|
30 |
+
from pandas.io.parsers.base_parser import (
|
31 |
+
ParserBase,
|
32 |
+
ParserError,
|
33 |
+
is_index_col,
|
34 |
+
)
|
35 |
+
|
36 |
+
if TYPE_CHECKING:
|
37 |
+
from collections.abc import (
|
38 |
+
Hashable,
|
39 |
+
Mapping,
|
40 |
+
Sequence,
|
41 |
+
)
|
42 |
+
|
43 |
+
from pandas._typing import (
|
44 |
+
ArrayLike,
|
45 |
+
DtypeArg,
|
46 |
+
DtypeObj,
|
47 |
+
ReadCsvBuffer,
|
48 |
+
)
|
49 |
+
|
50 |
+
from pandas import (
|
51 |
+
Index,
|
52 |
+
MultiIndex,
|
53 |
+
)
|
54 |
+
|
55 |
+
|
56 |
+
class CParserWrapper(ParserBase):
|
57 |
+
low_memory: bool
|
58 |
+
_reader: parsers.TextReader
|
59 |
+
|
60 |
+
def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None:
|
61 |
+
super().__init__(kwds)
|
62 |
+
self.kwds = kwds
|
63 |
+
kwds = kwds.copy()
|
64 |
+
|
65 |
+
self.low_memory = kwds.pop("low_memory", False)
|
66 |
+
|
67 |
+
# #2442
|
68 |
+
# error: Cannot determine type of 'index_col'
|
69 |
+
kwds["allow_leading_cols"] = (
|
70 |
+
self.index_col is not False # type: ignore[has-type]
|
71 |
+
)
|
72 |
+
|
73 |
+
# GH20529, validate usecol arg before TextReader
|
74 |
+
kwds["usecols"] = self.usecols
|
75 |
+
|
76 |
+
# Have to pass int, would break tests using TextReader directly otherwise :(
|
77 |
+
kwds["on_bad_lines"] = self.on_bad_lines.value
|
78 |
+
|
79 |
+
for key in (
|
80 |
+
"storage_options",
|
81 |
+
"encoding",
|
82 |
+
"memory_map",
|
83 |
+
"compression",
|
84 |
+
):
|
85 |
+
kwds.pop(key, None)
|
86 |
+
|
87 |
+
kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))
|
88 |
+
if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default:
|
89 |
+
kwds["dtype_backend"] = "numpy"
|
90 |
+
if kwds["dtype_backend"] == "pyarrow":
|
91 |
+
# Fail here loudly instead of in cython after reading
|
92 |
+
import_optional_dependency("pyarrow")
|
93 |
+
self._reader = parsers.TextReader(src, **kwds)
|
94 |
+
|
95 |
+
self.unnamed_cols = self._reader.unnamed_cols
|
96 |
+
|
97 |
+
# error: Cannot determine type of 'names'
|
98 |
+
passed_names = self.names is None # type: ignore[has-type]
|
99 |
+
|
100 |
+
if self._reader.header is None:
|
101 |
+
self.names = None
|
102 |
+
else:
|
103 |
+
# error: Cannot determine type of 'names'
|
104 |
+
# error: Cannot determine type of 'index_names'
|
105 |
+
(
|
106 |
+
self.names, # type: ignore[has-type]
|
107 |
+
self.index_names,
|
108 |
+
self.col_names,
|
109 |
+
passed_names,
|
110 |
+
) = self._extract_multi_indexer_columns(
|
111 |
+
self._reader.header,
|
112 |
+
self.index_names, # type: ignore[has-type]
|
113 |
+
passed_names,
|
114 |
+
)
|
115 |
+
|
116 |
+
# error: Cannot determine type of 'names'
|
117 |
+
if self.names is None: # type: ignore[has-type]
|
118 |
+
self.names = list(range(self._reader.table_width))
|
119 |
+
|
120 |
+
# gh-9755
|
121 |
+
#
|
122 |
+
# need to set orig_names here first
|
123 |
+
# so that proper indexing can be done
|
124 |
+
# with _set_noconvert_columns
|
125 |
+
#
|
126 |
+
# once names has been filtered, we will
|
127 |
+
# then set orig_names again to names
|
128 |
+
# error: Cannot determine type of 'names'
|
129 |
+
self.orig_names = self.names[:] # type: ignore[has-type]
|
130 |
+
|
131 |
+
if self.usecols:
|
132 |
+
usecols = self._evaluate_usecols(self.usecols, self.orig_names)
|
133 |
+
|
134 |
+
# GH 14671
|
135 |
+
# assert for mypy, orig_names is List or None, None would error in issubset
|
136 |
+
assert self.orig_names is not None
|
137 |
+
if self.usecols_dtype == "string" and not set(usecols).issubset(
|
138 |
+
self.orig_names
|
139 |
+
):
|
140 |
+
self._validate_usecols_names(usecols, self.orig_names)
|
141 |
+
|
142 |
+
# error: Cannot determine type of 'names'
|
143 |
+
if len(self.names) > len(usecols): # type: ignore[has-type]
|
144 |
+
# error: Cannot determine type of 'names'
|
145 |
+
self.names = [ # type: ignore[has-type]
|
146 |
+
n
|
147 |
+
# error: Cannot determine type of 'names'
|
148 |
+
for i, n in enumerate(self.names) # type: ignore[has-type]
|
149 |
+
if (i in usecols or n in usecols)
|
150 |
+
]
|
151 |
+
|
152 |
+
# error: Cannot determine type of 'names'
|
153 |
+
if len(self.names) < len(usecols): # type: ignore[has-type]
|
154 |
+
# error: Cannot determine type of 'names'
|
155 |
+
self._validate_usecols_names(
|
156 |
+
usecols,
|
157 |
+
self.names, # type: ignore[has-type]
|
158 |
+
)
|
159 |
+
|
160 |
+
# error: Cannot determine type of 'names'
|
161 |
+
self._validate_parse_dates_presence(self.names) # type: ignore[has-type]
|
162 |
+
self._set_noconvert_columns()
|
163 |
+
|
164 |
+
# error: Cannot determine type of 'names'
|
165 |
+
self.orig_names = self.names # type: ignore[has-type]
|
166 |
+
|
167 |
+
if not self._has_complex_date_col:
|
168 |
+
# error: Cannot determine type of 'index_col'
|
169 |
+
if self._reader.leading_cols == 0 and is_index_col(
|
170 |
+
self.index_col # type: ignore[has-type]
|
171 |
+
):
|
172 |
+
self._name_processed = True
|
173 |
+
(
|
174 |
+
index_names,
|
175 |
+
# error: Cannot determine type of 'names'
|
176 |
+
self.names, # type: ignore[has-type]
|
177 |
+
self.index_col,
|
178 |
+
) = self._clean_index_names(
|
179 |
+
# error: Cannot determine type of 'names'
|
180 |
+
self.names, # type: ignore[has-type]
|
181 |
+
# error: Cannot determine type of 'index_col'
|
182 |
+
self.index_col, # type: ignore[has-type]
|
183 |
+
)
|
184 |
+
|
185 |
+
if self.index_names is None:
|
186 |
+
self.index_names = index_names
|
187 |
+
|
188 |
+
if self._reader.header is None and not passed_names:
|
189 |
+
assert self.index_names is not None
|
190 |
+
self.index_names = [None] * len(self.index_names)
|
191 |
+
|
192 |
+
self._implicit_index = self._reader.leading_cols > 0
|
193 |
+
|
194 |
+
def close(self) -> None:
|
195 |
+
# close handles opened by C parser
|
196 |
+
try:
|
197 |
+
self._reader.close()
|
198 |
+
except ValueError:
|
199 |
+
pass
|
200 |
+
|
201 |
+
def _set_noconvert_columns(self) -> None:
|
202 |
+
"""
|
203 |
+
Set the columns that should not undergo dtype conversions.
|
204 |
+
|
205 |
+
Currently, any column that is involved with date parsing will not
|
206 |
+
undergo such conversions.
|
207 |
+
"""
|
208 |
+
assert self.orig_names is not None
|
209 |
+
# error: Cannot determine type of 'names'
|
210 |
+
|
211 |
+
# much faster than using orig_names.index(x) xref GH#44106
|
212 |
+
names_dict = {x: i for i, x in enumerate(self.orig_names)}
|
213 |
+
col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type]
|
214 |
+
# error: Cannot determine type of 'names'
|
215 |
+
noconvert_columns = self._set_noconvert_dtype_columns(
|
216 |
+
col_indices,
|
217 |
+
self.names, # type: ignore[has-type]
|
218 |
+
)
|
219 |
+
for col in noconvert_columns:
|
220 |
+
self._reader.set_noconvert(col)
|
221 |
+
|
222 |
+
def read(
|
223 |
+
self,
|
224 |
+
nrows: int | None = None,
|
225 |
+
) -> tuple[
|
226 |
+
Index | MultiIndex | None,
|
227 |
+
Sequence[Hashable] | MultiIndex,
|
228 |
+
Mapping[Hashable, ArrayLike],
|
229 |
+
]:
|
230 |
+
index: Index | MultiIndex | None
|
231 |
+
column_names: Sequence[Hashable] | MultiIndex
|
232 |
+
try:
|
233 |
+
if self.low_memory:
|
234 |
+
chunks = self._reader.read_low_memory(nrows)
|
235 |
+
# destructive to chunks
|
236 |
+
data = _concatenate_chunks(chunks)
|
237 |
+
|
238 |
+
else:
|
239 |
+
data = self._reader.read(nrows)
|
240 |
+
except StopIteration:
|
241 |
+
if self._first_chunk:
|
242 |
+
self._first_chunk = False
|
243 |
+
names = dedup_names(
|
244 |
+
self.orig_names,
|
245 |
+
is_potential_multi_index(self.orig_names, self.index_col),
|
246 |
+
)
|
247 |
+
index, columns, col_dict = self._get_empty_meta(
|
248 |
+
names,
|
249 |
+
dtype=self.dtype,
|
250 |
+
)
|
251 |
+
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
|
252 |
+
|
253 |
+
if self.usecols is not None:
|
254 |
+
columns = self._filter_usecols(columns)
|
255 |
+
|
256 |
+
col_dict = {k: v for k, v in col_dict.items() if k in columns}
|
257 |
+
|
258 |
+
return index, columns, col_dict
|
259 |
+
|
260 |
+
else:
|
261 |
+
self.close()
|
262 |
+
raise
|
263 |
+
|
264 |
+
# Done with first read, next time raise StopIteration
|
265 |
+
self._first_chunk = False
|
266 |
+
|
267 |
+
# error: Cannot determine type of 'names'
|
268 |
+
names = self.names # type: ignore[has-type]
|
269 |
+
|
270 |
+
if self._reader.leading_cols:
|
271 |
+
if self._has_complex_date_col:
|
272 |
+
raise NotImplementedError("file structure not yet supported")
|
273 |
+
|
274 |
+
# implicit index, no index names
|
275 |
+
arrays = []
|
276 |
+
|
277 |
+
if self.index_col and self._reader.leading_cols != len(self.index_col):
|
278 |
+
raise ParserError(
|
279 |
+
"Could not construct index. Requested to use "
|
280 |
+
f"{len(self.index_col)} number of columns, but "
|
281 |
+
f"{self._reader.leading_cols} left to parse."
|
282 |
+
)
|
283 |
+
|
284 |
+
for i in range(self._reader.leading_cols):
|
285 |
+
if self.index_col is None:
|
286 |
+
values = data.pop(i)
|
287 |
+
else:
|
288 |
+
values = data.pop(self.index_col[i])
|
289 |
+
|
290 |
+
values = self._maybe_parse_dates(values, i, try_parse_dates=True)
|
291 |
+
arrays.append(values)
|
292 |
+
|
293 |
+
index = ensure_index_from_sequences(arrays)
|
294 |
+
|
295 |
+
if self.usecols is not None:
|
296 |
+
names = self._filter_usecols(names)
|
297 |
+
|
298 |
+
names = dedup_names(names, is_potential_multi_index(names, self.index_col))
|
299 |
+
|
300 |
+
# rename dict keys
|
301 |
+
data_tups = sorted(data.items())
|
302 |
+
data = {k: v for k, (i, v) in zip(names, data_tups)}
|
303 |
+
|
304 |
+
column_names, date_data = self._do_date_conversions(names, data)
|
305 |
+
|
306 |
+
# maybe create a mi on the columns
|
307 |
+
column_names = self._maybe_make_multi_index_columns(
|
308 |
+
column_names, self.col_names
|
309 |
+
)
|
310 |
+
|
311 |
+
else:
|
312 |
+
# rename dict keys
|
313 |
+
data_tups = sorted(data.items())
|
314 |
+
|
315 |
+
# ugh, mutation
|
316 |
+
|
317 |
+
# assert for mypy, orig_names is List or None, None would error in list(...)
|
318 |
+
assert self.orig_names is not None
|
319 |
+
names = list(self.orig_names)
|
320 |
+
names = dedup_names(names, is_potential_multi_index(names, self.index_col))
|
321 |
+
|
322 |
+
if self.usecols is not None:
|
323 |
+
names = self._filter_usecols(names)
|
324 |
+
|
325 |
+
# columns as list
|
326 |
+
alldata = [x[1] for x in data_tups]
|
327 |
+
if self.usecols is None:
|
328 |
+
self._check_data_length(names, alldata)
|
329 |
+
|
330 |
+
data = {k: v for k, (i, v) in zip(names, data_tups)}
|
331 |
+
|
332 |
+
names, date_data = self._do_date_conversions(names, data)
|
333 |
+
index, column_names = self._make_index(date_data, alldata, names)
|
334 |
+
|
335 |
+
return index, column_names, date_data
|
336 |
+
|
337 |
+
def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]:
|
338 |
+
# hackish
|
339 |
+
usecols = self._evaluate_usecols(self.usecols, names)
|
340 |
+
if usecols is not None and len(names) != len(usecols):
|
341 |
+
names = [
|
342 |
+
name for i, name in enumerate(names) if i in usecols or name in usecols
|
343 |
+
]
|
344 |
+
return names
|
345 |
+
|
346 |
+
def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True):
|
347 |
+
if try_parse_dates and self._should_parse_dates(index):
|
348 |
+
values = self._date_conv(
|
349 |
+
values,
|
350 |
+
col=self.index_names[index] if self.index_names is not None else None,
|
351 |
+
)
|
352 |
+
return values
|
353 |
+
|
354 |
+
|
355 |
+
def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
|
356 |
+
"""
|
357 |
+
Concatenate chunks of data read with low_memory=True.
|
358 |
+
|
359 |
+
The tricky part is handling Categoricals, where different chunks
|
360 |
+
may have different inferred categories.
|
361 |
+
"""
|
362 |
+
names = list(chunks[0].keys())
|
363 |
+
warning_columns = []
|
364 |
+
|
365 |
+
result: dict = {}
|
366 |
+
for name in names:
|
367 |
+
arrs = [chunk.pop(name) for chunk in chunks]
|
368 |
+
# Check each arr for consistent types.
|
369 |
+
dtypes = {a.dtype for a in arrs}
|
370 |
+
non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)}
|
371 |
+
|
372 |
+
dtype = dtypes.pop()
|
373 |
+
if isinstance(dtype, CategoricalDtype):
|
374 |
+
result[name] = union_categoricals(arrs, sort_categories=False)
|
375 |
+
else:
|
376 |
+
result[name] = concat_compat(arrs)
|
377 |
+
if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object):
|
378 |
+
warning_columns.append(str(name))
|
379 |
+
|
380 |
+
if warning_columns:
|
381 |
+
warning_names = ",".join(warning_columns)
|
382 |
+
warning_message = " ".join(
|
383 |
+
[
|
384 |
+
f"Columns ({warning_names}) have mixed types. "
|
385 |
+
f"Specify dtype option on import or set low_memory=False."
|
386 |
+
]
|
387 |
+
)
|
388 |
+
warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level())
|
389 |
+
return result
|
390 |
+
|
391 |
+
|
392 |
+
def ensure_dtype_objs(
|
393 |
+
dtype: DtypeArg | dict[Hashable, DtypeArg] | None
|
394 |
+
) -> DtypeObj | dict[Hashable, DtypeObj] | None:
|
395 |
+
"""
|
396 |
+
Ensure we have either None, a dtype object, or a dictionary mapping to
|
397 |
+
dtype objects.
|
398 |
+
"""
|
399 |
+
if isinstance(dtype, defaultdict):
|
400 |
+
# "None" not callable [misc]
|
401 |
+
default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc]
|
402 |
+
dtype_converted: defaultdict = defaultdict(lambda: default_dtype)
|
403 |
+
for key in dtype.keys():
|
404 |
+
dtype_converted[key] = pandas_dtype(dtype[key])
|
405 |
+
return dtype_converted
|
406 |
+
elif isinstance(dtype, dict):
|
407 |
+
return {k: pandas_dtype(dtype[k]) for k in dtype}
|
408 |
+
elif dtype is not None:
|
409 |
+
return pandas_dtype(dtype)
|
410 |
+
return dtype
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py
ADDED
@@ -0,0 +1,1387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections import (
|
4 |
+
abc,
|
5 |
+
defaultdict,
|
6 |
+
)
|
7 |
+
from collections.abc import (
|
8 |
+
Hashable,
|
9 |
+
Iterator,
|
10 |
+
Mapping,
|
11 |
+
Sequence,
|
12 |
+
)
|
13 |
+
import csv
|
14 |
+
from io import StringIO
|
15 |
+
import re
|
16 |
+
from typing import (
|
17 |
+
IO,
|
18 |
+
TYPE_CHECKING,
|
19 |
+
DefaultDict,
|
20 |
+
Literal,
|
21 |
+
cast,
|
22 |
+
)
|
23 |
+
import warnings
|
24 |
+
|
25 |
+
import numpy as np
|
26 |
+
|
27 |
+
from pandas._libs import lib
|
28 |
+
from pandas.errors import (
|
29 |
+
EmptyDataError,
|
30 |
+
ParserError,
|
31 |
+
ParserWarning,
|
32 |
+
)
|
33 |
+
from pandas.util._decorators import cache_readonly
|
34 |
+
from pandas.util._exceptions import find_stack_level
|
35 |
+
|
36 |
+
from pandas.core.dtypes.common import (
|
37 |
+
is_bool_dtype,
|
38 |
+
is_integer,
|
39 |
+
is_numeric_dtype,
|
40 |
+
)
|
41 |
+
from pandas.core.dtypes.inference import is_dict_like
|
42 |
+
|
43 |
+
from pandas.io.common import (
|
44 |
+
dedup_names,
|
45 |
+
is_potential_multi_index,
|
46 |
+
)
|
47 |
+
from pandas.io.parsers.base_parser import (
|
48 |
+
ParserBase,
|
49 |
+
parser_defaults,
|
50 |
+
)
|
51 |
+
|
52 |
+
if TYPE_CHECKING:
|
53 |
+
from pandas._typing import (
|
54 |
+
ArrayLike,
|
55 |
+
ReadCsvBuffer,
|
56 |
+
Scalar,
|
57 |
+
)
|
58 |
+
|
59 |
+
from pandas import (
|
60 |
+
Index,
|
61 |
+
MultiIndex,
|
62 |
+
)
|
63 |
+
|
64 |
+
# BOM character (byte order mark)
|
65 |
+
# This exists at the beginning of a file to indicate endianness
|
66 |
+
# of a file (stream). Unfortunately, this marker screws up parsing,
|
67 |
+
# so we need to remove it if we see it.
|
68 |
+
_BOM = "\ufeff"
|
69 |
+
|
70 |
+
|
71 |
+
class PythonParser(ParserBase):
|
72 |
+
_no_thousands_columns: set[int]
|
73 |
+
|
74 |
+
def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None:
|
75 |
+
"""
|
76 |
+
Workhorse function for processing nested list into DataFrame
|
77 |
+
"""
|
78 |
+
super().__init__(kwds)
|
79 |
+
|
80 |
+
self.data: Iterator[str] | None = None
|
81 |
+
self.buf: list = []
|
82 |
+
self.pos = 0
|
83 |
+
self.line_pos = 0
|
84 |
+
|
85 |
+
self.skiprows = kwds["skiprows"]
|
86 |
+
|
87 |
+
if callable(self.skiprows):
|
88 |
+
self.skipfunc = self.skiprows
|
89 |
+
else:
|
90 |
+
self.skipfunc = lambda x: x in self.skiprows
|
91 |
+
|
92 |
+
self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"])
|
93 |
+
self.delimiter = kwds["delimiter"]
|
94 |
+
|
95 |
+
self.quotechar = kwds["quotechar"]
|
96 |
+
if isinstance(self.quotechar, str):
|
97 |
+
self.quotechar = str(self.quotechar)
|
98 |
+
|
99 |
+
self.escapechar = kwds["escapechar"]
|
100 |
+
self.doublequote = kwds["doublequote"]
|
101 |
+
self.skipinitialspace = kwds["skipinitialspace"]
|
102 |
+
self.lineterminator = kwds["lineterminator"]
|
103 |
+
self.quoting = kwds["quoting"]
|
104 |
+
self.skip_blank_lines = kwds["skip_blank_lines"]
|
105 |
+
|
106 |
+
self.has_index_names = False
|
107 |
+
if "has_index_names" in kwds:
|
108 |
+
self.has_index_names = kwds["has_index_names"]
|
109 |
+
|
110 |
+
self.verbose = kwds["verbose"]
|
111 |
+
|
112 |
+
self.thousands = kwds["thousands"]
|
113 |
+
self.decimal = kwds["decimal"]
|
114 |
+
|
115 |
+
self.comment = kwds["comment"]
|
116 |
+
|
117 |
+
# Set self.data to something that can read lines.
|
118 |
+
if isinstance(f, list):
|
119 |
+
# read_excel: f is a list
|
120 |
+
self.data = cast(Iterator[str], f)
|
121 |
+
else:
|
122 |
+
assert hasattr(f, "readline")
|
123 |
+
self.data = self._make_reader(f)
|
124 |
+
|
125 |
+
# Get columns in two steps: infer from data, then
|
126 |
+
# infer column indices from self.usecols if it is specified.
|
127 |
+
self._col_indices: list[int] | None = None
|
128 |
+
columns: list[list[Scalar | None]]
|
129 |
+
(
|
130 |
+
columns,
|
131 |
+
self.num_original_columns,
|
132 |
+
self.unnamed_cols,
|
133 |
+
) = self._infer_columns()
|
134 |
+
|
135 |
+
# Now self.columns has the set of columns that we will process.
|
136 |
+
# The original set is stored in self.original_columns.
|
137 |
+
# error: Cannot determine type of 'index_names'
|
138 |
+
(
|
139 |
+
self.columns,
|
140 |
+
self.index_names,
|
141 |
+
self.col_names,
|
142 |
+
_,
|
143 |
+
) = self._extract_multi_indexer_columns(
|
144 |
+
columns,
|
145 |
+
self.index_names, # type: ignore[has-type]
|
146 |
+
)
|
147 |
+
|
148 |
+
# get popped off for index
|
149 |
+
self.orig_names: list[Hashable] = list(self.columns)
|
150 |
+
|
151 |
+
# needs to be cleaned/refactored
|
152 |
+
# multiple date column thing turning into a real spaghetti factory
|
153 |
+
|
154 |
+
if not self._has_complex_date_col:
|
155 |
+
(index_names, self.orig_names, self.columns) = self._get_index_name()
|
156 |
+
self._name_processed = True
|
157 |
+
if self.index_names is None:
|
158 |
+
self.index_names = index_names
|
159 |
+
|
160 |
+
if self._col_indices is None:
|
161 |
+
self._col_indices = list(range(len(self.columns)))
|
162 |
+
|
163 |
+
self._parse_date_cols = self._validate_parse_dates_presence(self.columns)
|
164 |
+
self._no_thousands_columns = self._set_no_thousand_columns()
|
165 |
+
|
166 |
+
if len(self.decimal) != 1:
|
167 |
+
raise ValueError("Only length-1 decimal markers supported")
|
168 |
+
|
169 |
+
@cache_readonly
|
170 |
+
def num(self) -> re.Pattern:
|
171 |
+
decimal = re.escape(self.decimal)
|
172 |
+
if self.thousands is None:
|
173 |
+
regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"
|
174 |
+
else:
|
175 |
+
thousands = re.escape(self.thousands)
|
176 |
+
regex = (
|
177 |
+
rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
|
178 |
+
rf"([0-9]?(E|e)\-?[0-9]+)?$"
|
179 |
+
)
|
180 |
+
return re.compile(regex)
|
181 |
+
|
182 |
+
def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]):
|
183 |
+
sep = self.delimiter
|
184 |
+
|
185 |
+
if sep is None or len(sep) == 1:
|
186 |
+
if self.lineterminator:
|
187 |
+
raise ValueError(
|
188 |
+
"Custom line terminators not supported in python parser (yet)"
|
189 |
+
)
|
190 |
+
|
191 |
+
class MyDialect(csv.Dialect):
|
192 |
+
delimiter = self.delimiter
|
193 |
+
quotechar = self.quotechar
|
194 |
+
escapechar = self.escapechar
|
195 |
+
doublequote = self.doublequote
|
196 |
+
skipinitialspace = self.skipinitialspace
|
197 |
+
quoting = self.quoting
|
198 |
+
lineterminator = "\n"
|
199 |
+
|
200 |
+
dia = MyDialect
|
201 |
+
|
202 |
+
if sep is not None:
|
203 |
+
dia.delimiter = sep
|
204 |
+
else:
|
205 |
+
# attempt to sniff the delimiter from the first valid line,
|
206 |
+
# i.e. no comment line and not in skiprows
|
207 |
+
line = f.readline()
|
208 |
+
lines = self._check_comments([[line]])[0]
|
209 |
+
while self.skipfunc(self.pos) or not lines:
|
210 |
+
self.pos += 1
|
211 |
+
line = f.readline()
|
212 |
+
lines = self._check_comments([[line]])[0]
|
213 |
+
lines_str = cast(list[str], lines)
|
214 |
+
|
215 |
+
# since `line` was a string, lines will be a list containing
|
216 |
+
# only a single string
|
217 |
+
line = lines_str[0]
|
218 |
+
|
219 |
+
self.pos += 1
|
220 |
+
self.line_pos += 1
|
221 |
+
sniffed = csv.Sniffer().sniff(line)
|
222 |
+
dia.delimiter = sniffed.delimiter
|
223 |
+
|
224 |
+
# Note: encoding is irrelevant here
|
225 |
+
line_rdr = csv.reader(StringIO(line), dialect=dia)
|
226 |
+
self.buf.extend(list(line_rdr))
|
227 |
+
|
228 |
+
# Note: encoding is irrelevant here
|
229 |
+
reader = csv.reader(f, dialect=dia, strict=True)
|
230 |
+
|
231 |
+
else:
|
232 |
+
|
233 |
+
def _read():
|
234 |
+
line = f.readline()
|
235 |
+
pat = re.compile(sep)
|
236 |
+
|
237 |
+
yield pat.split(line.strip())
|
238 |
+
|
239 |
+
for line in f:
|
240 |
+
yield pat.split(line.strip())
|
241 |
+
|
242 |
+
reader = _read()
|
243 |
+
|
244 |
+
return reader
|
245 |
+
|
246 |
+
def read(
|
247 |
+
self, rows: int | None = None
|
248 |
+
) -> tuple[
|
249 |
+
Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]
|
250 |
+
]:
|
251 |
+
try:
|
252 |
+
content = self._get_lines(rows)
|
253 |
+
except StopIteration:
|
254 |
+
if self._first_chunk:
|
255 |
+
content = []
|
256 |
+
else:
|
257 |
+
self.close()
|
258 |
+
raise
|
259 |
+
|
260 |
+
# done with first read, next time raise StopIteration
|
261 |
+
self._first_chunk = False
|
262 |
+
|
263 |
+
columns: Sequence[Hashable] = list(self.orig_names)
|
264 |
+
if not len(content): # pragma: no cover
|
265 |
+
# DataFrame with the right metadata, even though it's length 0
|
266 |
+
# error: Cannot determine type of 'index_col'
|
267 |
+
names = dedup_names(
|
268 |
+
self.orig_names,
|
269 |
+
is_potential_multi_index(
|
270 |
+
self.orig_names,
|
271 |
+
self.index_col, # type: ignore[has-type]
|
272 |
+
),
|
273 |
+
)
|
274 |
+
index, columns, col_dict = self._get_empty_meta(
|
275 |
+
names,
|
276 |
+
self.dtype,
|
277 |
+
)
|
278 |
+
conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names)
|
279 |
+
return index, conv_columns, col_dict
|
280 |
+
|
281 |
+
# handle new style for names in index
|
282 |
+
count_empty_content_vals = count_empty_vals(content[0])
|
283 |
+
indexnamerow = None
|
284 |
+
if self.has_index_names and count_empty_content_vals == len(columns):
|
285 |
+
indexnamerow = content[0]
|
286 |
+
content = content[1:]
|
287 |
+
|
288 |
+
alldata = self._rows_to_cols(content)
|
289 |
+
data, columns = self._exclude_implicit_index(alldata)
|
290 |
+
|
291 |
+
conv_data = self._convert_data(data)
|
292 |
+
columns, conv_data = self._do_date_conversions(columns, conv_data)
|
293 |
+
|
294 |
+
index, result_columns = self._make_index(
|
295 |
+
conv_data, alldata, columns, indexnamerow
|
296 |
+
)
|
297 |
+
|
298 |
+
return index, result_columns, conv_data
|
299 |
+
|
300 |
+
def _exclude_implicit_index(
|
301 |
+
self,
|
302 |
+
alldata: list[np.ndarray],
|
303 |
+
) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]:
|
304 |
+
# error: Cannot determine type of 'index_col'
|
305 |
+
names = dedup_names(
|
306 |
+
self.orig_names,
|
307 |
+
is_potential_multi_index(
|
308 |
+
self.orig_names,
|
309 |
+
self.index_col, # type: ignore[has-type]
|
310 |
+
),
|
311 |
+
)
|
312 |
+
|
313 |
+
offset = 0
|
314 |
+
if self._implicit_index:
|
315 |
+
# error: Cannot determine type of 'index_col'
|
316 |
+
offset = len(self.index_col) # type: ignore[has-type]
|
317 |
+
|
318 |
+
len_alldata = len(alldata)
|
319 |
+
self._check_data_length(names, alldata)
|
320 |
+
|
321 |
+
return {
|
322 |
+
name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata
|
323 |
+
}, names
|
324 |
+
|
325 |
+
# legacy
|
326 |
+
def get_chunk(
|
327 |
+
self, size: int | None = None
|
328 |
+
) -> tuple[
|
329 |
+
Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]
|
330 |
+
]:
|
331 |
+
if size is None:
|
332 |
+
# error: "PythonParser" has no attribute "chunksize"
|
333 |
+
size = self.chunksize # type: ignore[attr-defined]
|
334 |
+
return self.read(rows=size)
|
335 |
+
|
336 |
+
def _convert_data(
|
337 |
+
self,
|
338 |
+
data: Mapping[Hashable, np.ndarray],
|
339 |
+
) -> Mapping[Hashable, ArrayLike]:
|
340 |
+
# apply converters
|
341 |
+
clean_conv = self._clean_mapping(self.converters)
|
342 |
+
clean_dtypes = self._clean_mapping(self.dtype)
|
343 |
+
|
344 |
+
# Apply NA values.
|
345 |
+
clean_na_values = {}
|
346 |
+
clean_na_fvalues = {}
|
347 |
+
|
348 |
+
if isinstance(self.na_values, dict):
|
349 |
+
for col in self.na_values:
|
350 |
+
na_value = self.na_values[col]
|
351 |
+
na_fvalue = self.na_fvalues[col]
|
352 |
+
|
353 |
+
if isinstance(col, int) and col not in self.orig_names:
|
354 |
+
col = self.orig_names[col]
|
355 |
+
|
356 |
+
clean_na_values[col] = na_value
|
357 |
+
clean_na_fvalues[col] = na_fvalue
|
358 |
+
else:
|
359 |
+
clean_na_values = self.na_values
|
360 |
+
clean_na_fvalues = self.na_fvalues
|
361 |
+
|
362 |
+
return self._convert_to_ndarrays(
|
363 |
+
data,
|
364 |
+
clean_na_values,
|
365 |
+
clean_na_fvalues,
|
366 |
+
self.verbose,
|
367 |
+
clean_conv,
|
368 |
+
clean_dtypes,
|
369 |
+
)
|
370 |
+
|
371 |
+
@cache_readonly
|
372 |
+
def _have_mi_columns(self) -> bool:
|
373 |
+
if self.header is None:
|
374 |
+
return False
|
375 |
+
|
376 |
+
header = self.header
|
377 |
+
if isinstance(header, (list, tuple, np.ndarray)):
|
378 |
+
return len(header) > 1
|
379 |
+
else:
|
380 |
+
return False
|
381 |
+
|
382 |
+
def _infer_columns(
|
383 |
+
self,
|
384 |
+
) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]:
|
385 |
+
names = self.names
|
386 |
+
num_original_columns = 0
|
387 |
+
clear_buffer = True
|
388 |
+
unnamed_cols: set[Scalar | None] = set()
|
389 |
+
|
390 |
+
if self.header is not None:
|
391 |
+
header = self.header
|
392 |
+
have_mi_columns = self._have_mi_columns
|
393 |
+
|
394 |
+
if isinstance(header, (list, tuple, np.ndarray)):
|
395 |
+
# we have a mi columns, so read an extra line
|
396 |
+
if have_mi_columns:
|
397 |
+
header = list(header) + [header[-1] + 1]
|
398 |
+
else:
|
399 |
+
header = [header]
|
400 |
+
|
401 |
+
columns: list[list[Scalar | None]] = []
|
402 |
+
for level, hr in enumerate(header):
|
403 |
+
try:
|
404 |
+
line = self._buffered_line()
|
405 |
+
|
406 |
+
while self.line_pos <= hr:
|
407 |
+
line = self._next_line()
|
408 |
+
|
409 |
+
except StopIteration as err:
|
410 |
+
if 0 < self.line_pos <= hr and (
|
411 |
+
not have_mi_columns or hr != header[-1]
|
412 |
+
):
|
413 |
+
# If no rows we want to raise a different message and if
|
414 |
+
# we have mi columns, the last line is not part of the header
|
415 |
+
joi = list(map(str, header[:-1] if have_mi_columns else header))
|
416 |
+
msg = f"[{','.join(joi)}], len of {len(joi)}, "
|
417 |
+
raise ValueError(
|
418 |
+
f"Passed header={msg}"
|
419 |
+
f"but only {self.line_pos} lines in file"
|
420 |
+
) from err
|
421 |
+
|
422 |
+
# We have an empty file, so check
|
423 |
+
# if columns are provided. That will
|
424 |
+
# serve as the 'line' for parsing
|
425 |
+
if have_mi_columns and hr > 0:
|
426 |
+
if clear_buffer:
|
427 |
+
self._clear_buffer()
|
428 |
+
columns.append([None] * len(columns[-1]))
|
429 |
+
return columns, num_original_columns, unnamed_cols
|
430 |
+
|
431 |
+
if not self.names:
|
432 |
+
raise EmptyDataError("No columns to parse from file") from err
|
433 |
+
|
434 |
+
line = self.names[:]
|
435 |
+
|
436 |
+
this_columns: list[Scalar | None] = []
|
437 |
+
this_unnamed_cols = []
|
438 |
+
|
439 |
+
for i, c in enumerate(line):
|
440 |
+
if c == "":
|
441 |
+
if have_mi_columns:
|
442 |
+
col_name = f"Unnamed: {i}_level_{level}"
|
443 |
+
else:
|
444 |
+
col_name = f"Unnamed: {i}"
|
445 |
+
|
446 |
+
this_unnamed_cols.append(i)
|
447 |
+
this_columns.append(col_name)
|
448 |
+
else:
|
449 |
+
this_columns.append(c)
|
450 |
+
|
451 |
+
if not have_mi_columns:
|
452 |
+
counts: DefaultDict = defaultdict(int)
|
453 |
+
# Ensure that regular columns are used before unnamed ones
|
454 |
+
# to keep given names and mangle unnamed columns
|
455 |
+
col_loop_order = [
|
456 |
+
i
|
457 |
+
for i in range(len(this_columns))
|
458 |
+
if i not in this_unnamed_cols
|
459 |
+
] + this_unnamed_cols
|
460 |
+
|
461 |
+
# TODO: Use pandas.io.common.dedup_names instead (see #50371)
|
462 |
+
for i in col_loop_order:
|
463 |
+
col = this_columns[i]
|
464 |
+
old_col = col
|
465 |
+
cur_count = counts[col]
|
466 |
+
|
467 |
+
if cur_count > 0:
|
468 |
+
while cur_count > 0:
|
469 |
+
counts[old_col] = cur_count + 1
|
470 |
+
col = f"{old_col}.{cur_count}"
|
471 |
+
if col in this_columns:
|
472 |
+
cur_count += 1
|
473 |
+
else:
|
474 |
+
cur_count = counts[col]
|
475 |
+
|
476 |
+
if (
|
477 |
+
self.dtype is not None
|
478 |
+
and is_dict_like(self.dtype)
|
479 |
+
and self.dtype.get(old_col) is not None
|
480 |
+
and self.dtype.get(col) is None
|
481 |
+
):
|
482 |
+
self.dtype.update({col: self.dtype.get(old_col)})
|
483 |
+
this_columns[i] = col
|
484 |
+
counts[col] = cur_count + 1
|
485 |
+
elif have_mi_columns:
|
486 |
+
# if we have grabbed an extra line, but its not in our
|
487 |
+
# format so save in the buffer, and create an blank extra
|
488 |
+
# line for the rest of the parsing code
|
489 |
+
if hr == header[-1]:
|
490 |
+
lc = len(this_columns)
|
491 |
+
# error: Cannot determine type of 'index_col'
|
492 |
+
sic = self.index_col # type: ignore[has-type]
|
493 |
+
ic = len(sic) if sic is not None else 0
|
494 |
+
unnamed_count = len(this_unnamed_cols)
|
495 |
+
|
496 |
+
# if wrong number of blanks or no index, not our format
|
497 |
+
if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0:
|
498 |
+
clear_buffer = False
|
499 |
+
this_columns = [None] * lc
|
500 |
+
self.buf = [self.buf[-1]]
|
501 |
+
|
502 |
+
columns.append(this_columns)
|
503 |
+
unnamed_cols.update({this_columns[i] for i in this_unnamed_cols})
|
504 |
+
|
505 |
+
if len(columns) == 1:
|
506 |
+
num_original_columns = len(this_columns)
|
507 |
+
|
508 |
+
if clear_buffer:
|
509 |
+
self._clear_buffer()
|
510 |
+
|
511 |
+
first_line: list[Scalar] | None
|
512 |
+
if names is not None:
|
513 |
+
# Read first row after header to check if data are longer
|
514 |
+
try:
|
515 |
+
first_line = self._next_line()
|
516 |
+
except StopIteration:
|
517 |
+
first_line = None
|
518 |
+
|
519 |
+
len_first_data_row = 0 if first_line is None else len(first_line)
|
520 |
+
|
521 |
+
if len(names) > len(columns[0]) and len(names) > len_first_data_row:
|
522 |
+
raise ValueError(
|
523 |
+
"Number of passed names did not match "
|
524 |
+
"number of header fields in the file"
|
525 |
+
)
|
526 |
+
if len(columns) > 1:
|
527 |
+
raise TypeError("Cannot pass names with multi-index columns")
|
528 |
+
|
529 |
+
if self.usecols is not None:
|
530 |
+
# Set _use_cols. We don't store columns because they are
|
531 |
+
# overwritten.
|
532 |
+
self._handle_usecols(columns, names, num_original_columns)
|
533 |
+
else:
|
534 |
+
num_original_columns = len(names)
|
535 |
+
if self._col_indices is not None and len(names) != len(
|
536 |
+
self._col_indices
|
537 |
+
):
|
538 |
+
columns = [[names[i] for i in sorted(self._col_indices)]]
|
539 |
+
else:
|
540 |
+
columns = [names]
|
541 |
+
else:
|
542 |
+
columns = self._handle_usecols(
|
543 |
+
columns, columns[0], num_original_columns
|
544 |
+
)
|
545 |
+
else:
|
546 |
+
ncols = len(self._header_line)
|
547 |
+
num_original_columns = ncols
|
548 |
+
|
549 |
+
if not names:
|
550 |
+
columns = [list(range(ncols))]
|
551 |
+
columns = self._handle_usecols(columns, columns[0], ncols)
|
552 |
+
elif self.usecols is None or len(names) >= ncols:
|
553 |
+
columns = self._handle_usecols([names], names, ncols)
|
554 |
+
num_original_columns = len(names)
|
555 |
+
elif not callable(self.usecols) and len(names) != len(self.usecols):
|
556 |
+
raise ValueError(
|
557 |
+
"Number of passed names did not match number of "
|
558 |
+
"header fields in the file"
|
559 |
+
)
|
560 |
+
else:
|
561 |
+
# Ignore output but set used columns.
|
562 |
+
columns = [names]
|
563 |
+
self._handle_usecols(columns, columns[0], ncols)
|
564 |
+
|
565 |
+
return columns, num_original_columns, unnamed_cols
|
566 |
+
|
567 |
+
@cache_readonly
|
568 |
+
def _header_line(self):
|
569 |
+
# Store line for reuse in _get_index_name
|
570 |
+
if self.header is not None:
|
571 |
+
return None
|
572 |
+
|
573 |
+
try:
|
574 |
+
line = self._buffered_line()
|
575 |
+
except StopIteration as err:
|
576 |
+
if not self.names:
|
577 |
+
raise EmptyDataError("No columns to parse from file") from err
|
578 |
+
|
579 |
+
line = self.names[:]
|
580 |
+
return line
|
581 |
+
|
582 |
+
def _handle_usecols(
|
583 |
+
self,
|
584 |
+
columns: list[list[Scalar | None]],
|
585 |
+
usecols_key: list[Scalar | None],
|
586 |
+
num_original_columns: int,
|
587 |
+
) -> list[list[Scalar | None]]:
|
588 |
+
"""
|
589 |
+
Sets self._col_indices
|
590 |
+
|
591 |
+
usecols_key is used if there are string usecols.
|
592 |
+
"""
|
593 |
+
col_indices: set[int] | list[int]
|
594 |
+
if self.usecols is not None:
|
595 |
+
if callable(self.usecols):
|
596 |
+
col_indices = self._evaluate_usecols(self.usecols, usecols_key)
|
597 |
+
elif any(isinstance(u, str) for u in self.usecols):
|
598 |
+
if len(columns) > 1:
|
599 |
+
raise ValueError(
|
600 |
+
"If using multiple headers, usecols must be integers."
|
601 |
+
)
|
602 |
+
col_indices = []
|
603 |
+
|
604 |
+
for col in self.usecols:
|
605 |
+
if isinstance(col, str):
|
606 |
+
try:
|
607 |
+
col_indices.append(usecols_key.index(col))
|
608 |
+
except ValueError:
|
609 |
+
self._validate_usecols_names(self.usecols, usecols_key)
|
610 |
+
else:
|
611 |
+
col_indices.append(col)
|
612 |
+
else:
|
613 |
+
missing_usecols = [
|
614 |
+
col for col in self.usecols if col >= num_original_columns
|
615 |
+
]
|
616 |
+
if missing_usecols:
|
617 |
+
raise ParserError(
|
618 |
+
"Defining usecols with out-of-bounds indices is not allowed. "
|
619 |
+
f"{missing_usecols} are out-of-bounds.",
|
620 |
+
)
|
621 |
+
col_indices = self.usecols
|
622 |
+
|
623 |
+
columns = [
|
624 |
+
[n for i, n in enumerate(column) if i in col_indices]
|
625 |
+
for column in columns
|
626 |
+
]
|
627 |
+
self._col_indices = sorted(col_indices)
|
628 |
+
return columns
|
629 |
+
|
630 |
+
def _buffered_line(self) -> list[Scalar]:
|
631 |
+
"""
|
632 |
+
Return a line from buffer, filling buffer if required.
|
633 |
+
"""
|
634 |
+
if len(self.buf) > 0:
|
635 |
+
return self.buf[0]
|
636 |
+
else:
|
637 |
+
return self._next_line()
|
638 |
+
|
639 |
+
def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]:
|
640 |
+
"""
|
641 |
+
Checks whether the file begins with the BOM character.
|
642 |
+
If it does, remove it. In addition, if there is quoting
|
643 |
+
in the field subsequent to the BOM, remove it as well
|
644 |
+
because it technically takes place at the beginning of
|
645 |
+
the name, not the middle of it.
|
646 |
+
"""
|
647 |
+
# first_row will be a list, so we need to check
|
648 |
+
# that that list is not empty before proceeding.
|
649 |
+
if not first_row:
|
650 |
+
return first_row
|
651 |
+
|
652 |
+
# The first element of this row is the one that could have the
|
653 |
+
# BOM that we want to remove. Check that the first element is a
|
654 |
+
# string before proceeding.
|
655 |
+
if not isinstance(first_row[0], str):
|
656 |
+
return first_row
|
657 |
+
|
658 |
+
# Check that the string is not empty, as that would
|
659 |
+
# obviously not have a BOM at the start of it.
|
660 |
+
if not first_row[0]:
|
661 |
+
return first_row
|
662 |
+
|
663 |
+
# Since the string is non-empty, check that it does
|
664 |
+
# in fact begin with a BOM.
|
665 |
+
first_elt = first_row[0][0]
|
666 |
+
if first_elt != _BOM:
|
667 |
+
return first_row
|
668 |
+
|
669 |
+
first_row_bom = first_row[0]
|
670 |
+
new_row: str
|
671 |
+
|
672 |
+
if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar:
|
673 |
+
start = 2
|
674 |
+
quote = first_row_bom[1]
|
675 |
+
end = first_row_bom[2:].index(quote) + 2
|
676 |
+
|
677 |
+
# Extract the data between the quotation marks
|
678 |
+
new_row = first_row_bom[start:end]
|
679 |
+
|
680 |
+
# Extract any remaining data after the second
|
681 |
+
# quotation mark.
|
682 |
+
if len(first_row_bom) > end + 1:
|
683 |
+
new_row += first_row_bom[end + 1 :]
|
684 |
+
|
685 |
+
else:
|
686 |
+
# No quotation so just remove BOM from first element
|
687 |
+
new_row = first_row_bom[1:]
|
688 |
+
|
689 |
+
new_row_list: list[Scalar] = [new_row]
|
690 |
+
return new_row_list + first_row[1:]
|
691 |
+
|
692 |
+
def _is_line_empty(self, line: list[Scalar]) -> bool:
|
693 |
+
"""
|
694 |
+
Check if a line is empty or not.
|
695 |
+
|
696 |
+
Parameters
|
697 |
+
----------
|
698 |
+
line : str, array-like
|
699 |
+
The line of data to check.
|
700 |
+
|
701 |
+
Returns
|
702 |
+
-------
|
703 |
+
boolean : Whether or not the line is empty.
|
704 |
+
"""
|
705 |
+
return not line or all(not x for x in line)
|
706 |
+
|
707 |
+
def _next_line(self) -> list[Scalar]:
|
708 |
+
if isinstance(self.data, list):
|
709 |
+
while self.skipfunc(self.pos):
|
710 |
+
if self.pos >= len(self.data):
|
711 |
+
break
|
712 |
+
self.pos += 1
|
713 |
+
|
714 |
+
while True:
|
715 |
+
try:
|
716 |
+
line = self._check_comments([self.data[self.pos]])[0]
|
717 |
+
self.pos += 1
|
718 |
+
# either uncommented or blank to begin with
|
719 |
+
if not self.skip_blank_lines and (
|
720 |
+
self._is_line_empty(self.data[self.pos - 1]) or line
|
721 |
+
):
|
722 |
+
break
|
723 |
+
if self.skip_blank_lines:
|
724 |
+
ret = self._remove_empty_lines([line])
|
725 |
+
if ret:
|
726 |
+
line = ret[0]
|
727 |
+
break
|
728 |
+
except IndexError:
|
729 |
+
raise StopIteration
|
730 |
+
else:
|
731 |
+
while self.skipfunc(self.pos):
|
732 |
+
self.pos += 1
|
733 |
+
# assert for mypy, data is Iterator[str] or None, would error in next
|
734 |
+
assert self.data is not None
|
735 |
+
next(self.data)
|
736 |
+
|
737 |
+
while True:
|
738 |
+
orig_line = self._next_iter_line(row_num=self.pos + 1)
|
739 |
+
self.pos += 1
|
740 |
+
|
741 |
+
if orig_line is not None:
|
742 |
+
line = self._check_comments([orig_line])[0]
|
743 |
+
|
744 |
+
if self.skip_blank_lines:
|
745 |
+
ret = self._remove_empty_lines([line])
|
746 |
+
|
747 |
+
if ret:
|
748 |
+
line = ret[0]
|
749 |
+
break
|
750 |
+
elif self._is_line_empty(orig_line) or line:
|
751 |
+
break
|
752 |
+
|
753 |
+
# This was the first line of the file,
|
754 |
+
# which could contain the BOM at the
|
755 |
+
# beginning of it.
|
756 |
+
if self.pos == 1:
|
757 |
+
line = self._check_for_bom(line)
|
758 |
+
|
759 |
+
self.line_pos += 1
|
760 |
+
self.buf.append(line)
|
761 |
+
return line
|
762 |
+
|
763 |
+
def _alert_malformed(self, msg: str, row_num: int) -> None:
|
764 |
+
"""
|
765 |
+
Alert a user about a malformed row, depending on value of
|
766 |
+
`self.on_bad_lines` enum.
|
767 |
+
|
768 |
+
If `self.on_bad_lines` is ERROR, the alert will be `ParserError`.
|
769 |
+
If `self.on_bad_lines` is WARN, the alert will be printed out.
|
770 |
+
|
771 |
+
Parameters
|
772 |
+
----------
|
773 |
+
msg: str
|
774 |
+
The error message to display.
|
775 |
+
row_num: int
|
776 |
+
The row number where the parsing error occurred.
|
777 |
+
Because this row number is displayed, we 1-index,
|
778 |
+
even though we 0-index internally.
|
779 |
+
"""
|
780 |
+
if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
|
781 |
+
raise ParserError(msg)
|
782 |
+
if self.on_bad_lines == self.BadLineHandleMethod.WARN:
|
783 |
+
warnings.warn(
|
784 |
+
f"Skipping line {row_num}: {msg}\n",
|
785 |
+
ParserWarning,
|
786 |
+
stacklevel=find_stack_level(),
|
787 |
+
)
|
788 |
+
|
789 |
+
def _next_iter_line(self, row_num: int) -> list[Scalar] | None:
|
790 |
+
"""
|
791 |
+
Wrapper around iterating through `self.data` (CSV source).
|
792 |
+
|
793 |
+
When a CSV error is raised, we check for specific
|
794 |
+
error messages that allow us to customize the
|
795 |
+
error message displayed to the user.
|
796 |
+
|
797 |
+
Parameters
|
798 |
+
----------
|
799 |
+
row_num: int
|
800 |
+
The row number of the line being parsed.
|
801 |
+
"""
|
802 |
+
try:
|
803 |
+
# assert for mypy, data is Iterator[str] or None, would error in next
|
804 |
+
assert self.data is not None
|
805 |
+
line = next(self.data)
|
806 |
+
# for mypy
|
807 |
+
assert isinstance(line, list)
|
808 |
+
return line
|
809 |
+
except csv.Error as e:
|
810 |
+
if self.on_bad_lines in (
|
811 |
+
self.BadLineHandleMethod.ERROR,
|
812 |
+
self.BadLineHandleMethod.WARN,
|
813 |
+
):
|
814 |
+
msg = str(e)
|
815 |
+
|
816 |
+
if "NULL byte" in msg or "line contains NUL" in msg:
|
817 |
+
msg = (
|
818 |
+
"NULL byte detected. This byte "
|
819 |
+
"cannot be processed in Python's "
|
820 |
+
"native csv library at the moment, "
|
821 |
+
"so please pass in engine='c' instead"
|
822 |
+
)
|
823 |
+
|
824 |
+
if self.skipfooter > 0:
|
825 |
+
reason = (
|
826 |
+
"Error could possibly be due to "
|
827 |
+
"parsing errors in the skipped footer rows "
|
828 |
+
"(the skipfooter keyword is only applied "
|
829 |
+
"after Python's csv library has parsed "
|
830 |
+
"all rows)."
|
831 |
+
)
|
832 |
+
msg += ". " + reason
|
833 |
+
|
834 |
+
self._alert_malformed(msg, row_num)
|
835 |
+
return None
|
836 |
+
|
837 |
+
def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
|
838 |
+
if self.comment is None:
|
839 |
+
return lines
|
840 |
+
ret = []
|
841 |
+
for line in lines:
|
842 |
+
rl = []
|
843 |
+
for x in line:
|
844 |
+
if (
|
845 |
+
not isinstance(x, str)
|
846 |
+
or self.comment not in x
|
847 |
+
or x in self.na_values
|
848 |
+
):
|
849 |
+
rl.append(x)
|
850 |
+
else:
|
851 |
+
x = x[: x.find(self.comment)]
|
852 |
+
if len(x) > 0:
|
853 |
+
rl.append(x)
|
854 |
+
break
|
855 |
+
ret.append(rl)
|
856 |
+
return ret
|
857 |
+
|
858 |
+
def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
|
859 |
+
"""
|
860 |
+
Iterate through the lines and remove any that are
|
861 |
+
either empty or contain only one whitespace value
|
862 |
+
|
863 |
+
Parameters
|
864 |
+
----------
|
865 |
+
lines : list of list of Scalars
|
866 |
+
The array of lines that we are to filter.
|
867 |
+
|
868 |
+
Returns
|
869 |
+
-------
|
870 |
+
filtered_lines : list of list of Scalars
|
871 |
+
The same array of lines with the "empty" ones removed.
|
872 |
+
"""
|
873 |
+
# Remove empty lines and lines with only one whitespace value
|
874 |
+
ret = [
|
875 |
+
line
|
876 |
+
for line in lines
|
877 |
+
if (
|
878 |
+
len(line) > 1
|
879 |
+
or len(line) == 1
|
880 |
+
and (not isinstance(line[0], str) or line[0].strip())
|
881 |
+
)
|
882 |
+
]
|
883 |
+
return ret
|
884 |
+
|
885 |
+
def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
|
886 |
+
if self.thousands is None:
|
887 |
+
return lines
|
888 |
+
|
889 |
+
return self._search_replace_num_columns(
|
890 |
+
lines=lines, search=self.thousands, replace=""
|
891 |
+
)
|
892 |
+
|
893 |
+
def _search_replace_num_columns(
|
894 |
+
self, lines: list[list[Scalar]], search: str, replace: str
|
895 |
+
) -> list[list[Scalar]]:
|
896 |
+
ret = []
|
897 |
+
for line in lines:
|
898 |
+
rl = []
|
899 |
+
for i, x in enumerate(line):
|
900 |
+
if (
|
901 |
+
not isinstance(x, str)
|
902 |
+
or search not in x
|
903 |
+
or i in self._no_thousands_columns
|
904 |
+
or not self.num.search(x.strip())
|
905 |
+
):
|
906 |
+
rl.append(x)
|
907 |
+
else:
|
908 |
+
rl.append(x.replace(search, replace))
|
909 |
+
ret.append(rl)
|
910 |
+
return ret
|
911 |
+
|
912 |
+
def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
|
913 |
+
if self.decimal == parser_defaults["decimal"]:
|
914 |
+
return lines
|
915 |
+
|
916 |
+
return self._search_replace_num_columns(
|
917 |
+
lines=lines, search=self.decimal, replace="."
|
918 |
+
)
|
919 |
+
|
920 |
+
def _clear_buffer(self) -> None:
|
921 |
+
self.buf = []
|
922 |
+
|
923 |
+
def _get_index_name(
|
924 |
+
self,
|
925 |
+
) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]:
|
926 |
+
"""
|
927 |
+
Try several cases to get lines:
|
928 |
+
|
929 |
+
0) There are headers on row 0 and row 1 and their
|
930 |
+
total summed lengths equals the length of the next line.
|
931 |
+
Treat row 0 as columns and row 1 as indices
|
932 |
+
1) Look for implicit index: there are more columns
|
933 |
+
on row 1 than row 0. If this is true, assume that row
|
934 |
+
1 lists index columns and row 0 lists normal columns.
|
935 |
+
2) Get index from the columns if it was listed.
|
936 |
+
"""
|
937 |
+
columns: Sequence[Hashable] = self.orig_names
|
938 |
+
orig_names = list(columns)
|
939 |
+
columns = list(columns)
|
940 |
+
|
941 |
+
line: list[Scalar] | None
|
942 |
+
if self._header_line is not None:
|
943 |
+
line = self._header_line
|
944 |
+
else:
|
945 |
+
try:
|
946 |
+
line = self._next_line()
|
947 |
+
except StopIteration:
|
948 |
+
line = None
|
949 |
+
|
950 |
+
next_line: list[Scalar] | None
|
951 |
+
try:
|
952 |
+
next_line = self._next_line()
|
953 |
+
except StopIteration:
|
954 |
+
next_line = None
|
955 |
+
|
956 |
+
# implicitly index_col=0 b/c 1 fewer column names
|
957 |
+
implicit_first_cols = 0
|
958 |
+
if line is not None:
|
959 |
+
# leave it 0, #2442
|
960 |
+
# Case 1
|
961 |
+
# error: Cannot determine type of 'index_col'
|
962 |
+
index_col = self.index_col # type: ignore[has-type]
|
963 |
+
if index_col is not False:
|
964 |
+
implicit_first_cols = len(line) - self.num_original_columns
|
965 |
+
|
966 |
+
# Case 0
|
967 |
+
if (
|
968 |
+
next_line is not None
|
969 |
+
and self.header is not None
|
970 |
+
and index_col is not False
|
971 |
+
):
|
972 |
+
if len(next_line) == len(line) + self.num_original_columns:
|
973 |
+
# column and index names on diff rows
|
974 |
+
self.index_col = list(range(len(line)))
|
975 |
+
self.buf = self.buf[1:]
|
976 |
+
|
977 |
+
for c in reversed(line):
|
978 |
+
columns.insert(0, c)
|
979 |
+
|
980 |
+
# Update list of original names to include all indices.
|
981 |
+
orig_names = list(columns)
|
982 |
+
self.num_original_columns = len(columns)
|
983 |
+
return line, orig_names, columns
|
984 |
+
|
985 |
+
if implicit_first_cols > 0:
|
986 |
+
# Case 1
|
987 |
+
self._implicit_index = True
|
988 |
+
if self.index_col is None:
|
989 |
+
self.index_col = list(range(implicit_first_cols))
|
990 |
+
|
991 |
+
index_name = None
|
992 |
+
|
993 |
+
else:
|
994 |
+
# Case 2
|
995 |
+
(index_name, _, self.index_col) = self._clean_index_names(
|
996 |
+
columns, self.index_col
|
997 |
+
)
|
998 |
+
|
999 |
+
return index_name, orig_names, columns
|
1000 |
+
|
1001 |
+
def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]:
|
1002 |
+
col_len = self.num_original_columns
|
1003 |
+
|
1004 |
+
if self._implicit_index:
|
1005 |
+
col_len += len(self.index_col)
|
1006 |
+
|
1007 |
+
max_len = max(len(row) for row in content)
|
1008 |
+
|
1009 |
+
# Check that there are no rows with too many
|
1010 |
+
# elements in their row (rows with too few
|
1011 |
+
# elements are padded with NaN).
|
1012 |
+
# error: Non-overlapping identity check (left operand type: "List[int]",
|
1013 |
+
# right operand type: "Literal[False]")
|
1014 |
+
if (
|
1015 |
+
max_len > col_len
|
1016 |
+
and self.index_col is not False # type: ignore[comparison-overlap]
|
1017 |
+
and self.usecols is None
|
1018 |
+
):
|
1019 |
+
footers = self.skipfooter if self.skipfooter else 0
|
1020 |
+
bad_lines = []
|
1021 |
+
|
1022 |
+
iter_content = enumerate(content)
|
1023 |
+
content_len = len(content)
|
1024 |
+
content = []
|
1025 |
+
|
1026 |
+
for i, _content in iter_content:
|
1027 |
+
actual_len = len(_content)
|
1028 |
+
|
1029 |
+
if actual_len > col_len:
|
1030 |
+
if callable(self.on_bad_lines):
|
1031 |
+
new_l = self.on_bad_lines(_content)
|
1032 |
+
if new_l is not None:
|
1033 |
+
content.append(new_l)
|
1034 |
+
elif self.on_bad_lines in (
|
1035 |
+
self.BadLineHandleMethod.ERROR,
|
1036 |
+
self.BadLineHandleMethod.WARN,
|
1037 |
+
):
|
1038 |
+
row_num = self.pos - (content_len - i + footers)
|
1039 |
+
bad_lines.append((row_num, actual_len))
|
1040 |
+
|
1041 |
+
if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
|
1042 |
+
break
|
1043 |
+
else:
|
1044 |
+
content.append(_content)
|
1045 |
+
|
1046 |
+
for row_num, actual_len in bad_lines:
|
1047 |
+
msg = (
|
1048 |
+
f"Expected {col_len} fields in line {row_num + 1}, saw "
|
1049 |
+
f"{actual_len}"
|
1050 |
+
)
|
1051 |
+
if (
|
1052 |
+
self.delimiter
|
1053 |
+
and len(self.delimiter) > 1
|
1054 |
+
and self.quoting != csv.QUOTE_NONE
|
1055 |
+
):
|
1056 |
+
# see gh-13374
|
1057 |
+
reason = (
|
1058 |
+
"Error could possibly be due to quotes being "
|
1059 |
+
"ignored when a multi-char delimiter is used."
|
1060 |
+
)
|
1061 |
+
msg += ". " + reason
|
1062 |
+
|
1063 |
+
self._alert_malformed(msg, row_num + 1)
|
1064 |
+
|
1065 |
+
# see gh-13320
|
1066 |
+
zipped_content = list(lib.to_object_array(content, min_width=col_len).T)
|
1067 |
+
|
1068 |
+
if self.usecols:
|
1069 |
+
assert self._col_indices is not None
|
1070 |
+
col_indices = self._col_indices
|
1071 |
+
|
1072 |
+
if self._implicit_index:
|
1073 |
+
zipped_content = [
|
1074 |
+
a
|
1075 |
+
for i, a in enumerate(zipped_content)
|
1076 |
+
if (
|
1077 |
+
i < len(self.index_col)
|
1078 |
+
or i - len(self.index_col) in col_indices
|
1079 |
+
)
|
1080 |
+
]
|
1081 |
+
else:
|
1082 |
+
zipped_content = [
|
1083 |
+
a for i, a in enumerate(zipped_content) if i in col_indices
|
1084 |
+
]
|
1085 |
+
return zipped_content
|
1086 |
+
|
1087 |
+
def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]:
|
1088 |
+
lines = self.buf
|
1089 |
+
new_rows = None
|
1090 |
+
|
1091 |
+
# already fetched some number
|
1092 |
+
if rows is not None:
|
1093 |
+
# we already have the lines in the buffer
|
1094 |
+
if len(self.buf) >= rows:
|
1095 |
+
new_rows, self.buf = self.buf[:rows], self.buf[rows:]
|
1096 |
+
|
1097 |
+
# need some lines
|
1098 |
+
else:
|
1099 |
+
rows -= len(self.buf)
|
1100 |
+
|
1101 |
+
if new_rows is None:
|
1102 |
+
if isinstance(self.data, list):
|
1103 |
+
if self.pos > len(self.data):
|
1104 |
+
raise StopIteration
|
1105 |
+
if rows is None:
|
1106 |
+
new_rows = self.data[self.pos :]
|
1107 |
+
new_pos = len(self.data)
|
1108 |
+
else:
|
1109 |
+
new_rows = self.data[self.pos : self.pos + rows]
|
1110 |
+
new_pos = self.pos + rows
|
1111 |
+
|
1112 |
+
new_rows = self._remove_skipped_rows(new_rows)
|
1113 |
+
lines.extend(new_rows)
|
1114 |
+
self.pos = new_pos
|
1115 |
+
|
1116 |
+
else:
|
1117 |
+
new_rows = []
|
1118 |
+
try:
|
1119 |
+
if rows is not None:
|
1120 |
+
row_index = 0
|
1121 |
+
row_ct = 0
|
1122 |
+
offset = self.pos if self.pos is not None else 0
|
1123 |
+
while row_ct < rows:
|
1124 |
+
# assert for mypy, data is Iterator[str] or None, would
|
1125 |
+
# error in next
|
1126 |
+
assert self.data is not None
|
1127 |
+
new_row = next(self.data)
|
1128 |
+
if not self.skipfunc(offset + row_index):
|
1129 |
+
row_ct += 1
|
1130 |
+
row_index += 1
|
1131 |
+
new_rows.append(new_row)
|
1132 |
+
|
1133 |
+
len_new_rows = len(new_rows)
|
1134 |
+
new_rows = self._remove_skipped_rows(new_rows)
|
1135 |
+
lines.extend(new_rows)
|
1136 |
+
else:
|
1137 |
+
rows = 0
|
1138 |
+
|
1139 |
+
while True:
|
1140 |
+
next_row = self._next_iter_line(row_num=self.pos + rows + 1)
|
1141 |
+
rows += 1
|
1142 |
+
|
1143 |
+
if next_row is not None:
|
1144 |
+
new_rows.append(next_row)
|
1145 |
+
len_new_rows = len(new_rows)
|
1146 |
+
|
1147 |
+
except StopIteration:
|
1148 |
+
len_new_rows = len(new_rows)
|
1149 |
+
new_rows = self._remove_skipped_rows(new_rows)
|
1150 |
+
lines.extend(new_rows)
|
1151 |
+
if len(lines) == 0:
|
1152 |
+
raise
|
1153 |
+
self.pos += len_new_rows
|
1154 |
+
|
1155 |
+
self.buf = []
|
1156 |
+
else:
|
1157 |
+
lines = new_rows
|
1158 |
+
|
1159 |
+
if self.skipfooter:
|
1160 |
+
lines = lines[: -self.skipfooter]
|
1161 |
+
|
1162 |
+
lines = self._check_comments(lines)
|
1163 |
+
if self.skip_blank_lines:
|
1164 |
+
lines = self._remove_empty_lines(lines)
|
1165 |
+
lines = self._check_thousands(lines)
|
1166 |
+
return self._check_decimal(lines)
|
1167 |
+
|
1168 |
+
def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]:
|
1169 |
+
if self.skiprows:
|
1170 |
+
return [
|
1171 |
+
row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos)
|
1172 |
+
]
|
1173 |
+
return new_rows
|
1174 |
+
|
1175 |
+
def _set_no_thousand_columns(self) -> set[int]:
|
1176 |
+
no_thousands_columns: set[int] = set()
|
1177 |
+
if self.columns and self.parse_dates:
|
1178 |
+
assert self._col_indices is not None
|
1179 |
+
no_thousands_columns = self._set_noconvert_dtype_columns(
|
1180 |
+
self._col_indices, self.columns
|
1181 |
+
)
|
1182 |
+
if self.columns and self.dtype:
|
1183 |
+
assert self._col_indices is not None
|
1184 |
+
for i, col in zip(self._col_indices, self.columns):
|
1185 |
+
if not isinstance(self.dtype, dict) and not is_numeric_dtype(
|
1186 |
+
self.dtype
|
1187 |
+
):
|
1188 |
+
no_thousands_columns.add(i)
|
1189 |
+
if (
|
1190 |
+
isinstance(self.dtype, dict)
|
1191 |
+
and col in self.dtype
|
1192 |
+
and (
|
1193 |
+
not is_numeric_dtype(self.dtype[col])
|
1194 |
+
or is_bool_dtype(self.dtype[col])
|
1195 |
+
)
|
1196 |
+
):
|
1197 |
+
no_thousands_columns.add(i)
|
1198 |
+
return no_thousands_columns
|
1199 |
+
|
1200 |
+
|
1201 |
+
class FixedWidthReader(abc.Iterator):
|
1202 |
+
"""
|
1203 |
+
A reader of fixed-width lines.
|
1204 |
+
"""
|
1205 |
+
|
1206 |
+
def __init__(
|
1207 |
+
self,
|
1208 |
+
f: IO[str] | ReadCsvBuffer[str],
|
1209 |
+
colspecs: list[tuple[int, int]] | Literal["infer"],
|
1210 |
+
delimiter: str | None,
|
1211 |
+
comment: str | None,
|
1212 |
+
skiprows: set[int] | None = None,
|
1213 |
+
infer_nrows: int = 100,
|
1214 |
+
) -> None:
|
1215 |
+
self.f = f
|
1216 |
+
self.buffer: Iterator | None = None
|
1217 |
+
self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t "
|
1218 |
+
self.comment = comment
|
1219 |
+
if colspecs == "infer":
|
1220 |
+
self.colspecs = self.detect_colspecs(
|
1221 |
+
infer_nrows=infer_nrows, skiprows=skiprows
|
1222 |
+
)
|
1223 |
+
else:
|
1224 |
+
self.colspecs = colspecs
|
1225 |
+
|
1226 |
+
if not isinstance(self.colspecs, (tuple, list)):
|
1227 |
+
raise TypeError(
|
1228 |
+
"column specifications must be a list or tuple, "
|
1229 |
+
f"input was a {type(colspecs).__name__}"
|
1230 |
+
)
|
1231 |
+
|
1232 |
+
for colspec in self.colspecs:
|
1233 |
+
if not (
|
1234 |
+
isinstance(colspec, (tuple, list))
|
1235 |
+
and len(colspec) == 2
|
1236 |
+
and isinstance(colspec[0], (int, np.integer, type(None)))
|
1237 |
+
and isinstance(colspec[1], (int, np.integer, type(None)))
|
1238 |
+
):
|
1239 |
+
raise TypeError(
|
1240 |
+
"Each column specification must be "
|
1241 |
+
"2 element tuple or list of integers"
|
1242 |
+
)
|
1243 |
+
|
1244 |
+
def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]:
|
1245 |
+
"""
|
1246 |
+
Read rows from self.f, skipping as specified.
|
1247 |
+
|
1248 |
+
We distinguish buffer_rows (the first <= infer_nrows
|
1249 |
+
lines) from the rows returned to detect_colspecs
|
1250 |
+
because it's simpler to leave the other locations
|
1251 |
+
with skiprows logic alone than to modify them to
|
1252 |
+
deal with the fact we skipped some rows here as
|
1253 |
+
well.
|
1254 |
+
|
1255 |
+
Parameters
|
1256 |
+
----------
|
1257 |
+
infer_nrows : int
|
1258 |
+
Number of rows to read from self.f, not counting
|
1259 |
+
rows that are skipped.
|
1260 |
+
skiprows: set, optional
|
1261 |
+
Indices of rows to skip.
|
1262 |
+
|
1263 |
+
Returns
|
1264 |
+
-------
|
1265 |
+
detect_rows : list of str
|
1266 |
+
A list containing the rows to read.
|
1267 |
+
|
1268 |
+
"""
|
1269 |
+
if skiprows is None:
|
1270 |
+
skiprows = set()
|
1271 |
+
buffer_rows = []
|
1272 |
+
detect_rows = []
|
1273 |
+
for i, row in enumerate(self.f):
|
1274 |
+
if i not in skiprows:
|
1275 |
+
detect_rows.append(row)
|
1276 |
+
buffer_rows.append(row)
|
1277 |
+
if len(detect_rows) >= infer_nrows:
|
1278 |
+
break
|
1279 |
+
self.buffer = iter(buffer_rows)
|
1280 |
+
return detect_rows
|
1281 |
+
|
1282 |
+
def detect_colspecs(
|
1283 |
+
self, infer_nrows: int = 100, skiprows: set[int] | None = None
|
1284 |
+
) -> list[tuple[int, int]]:
|
1285 |
+
# Regex escape the delimiters
|
1286 |
+
delimiters = "".join([rf"\{x}" for x in self.delimiter])
|
1287 |
+
pattern = re.compile(f"([^{delimiters}]+)")
|
1288 |
+
rows = self.get_rows(infer_nrows, skiprows)
|
1289 |
+
if not rows:
|
1290 |
+
raise EmptyDataError("No rows from which to infer column width")
|
1291 |
+
max_len = max(map(len, rows))
|
1292 |
+
mask = np.zeros(max_len + 1, dtype=int)
|
1293 |
+
if self.comment is not None:
|
1294 |
+
rows = [row.partition(self.comment)[0] for row in rows]
|
1295 |
+
for row in rows:
|
1296 |
+
for m in pattern.finditer(row):
|
1297 |
+
mask[m.start() : m.end()] = 1
|
1298 |
+
shifted = np.roll(mask, 1)
|
1299 |
+
shifted[0] = 0
|
1300 |
+
edges = np.where((mask ^ shifted) == 1)[0]
|
1301 |
+
edge_pairs = list(zip(edges[::2], edges[1::2]))
|
1302 |
+
return edge_pairs
|
1303 |
+
|
1304 |
+
def __next__(self) -> list[str]:
|
1305 |
+
# Argument 1 to "next" has incompatible type "Union[IO[str],
|
1306 |
+
# ReadCsvBuffer[str]]"; expected "SupportsNext[str]"
|
1307 |
+
if self.buffer is not None:
|
1308 |
+
try:
|
1309 |
+
line = next(self.buffer)
|
1310 |
+
except StopIteration:
|
1311 |
+
self.buffer = None
|
1312 |
+
line = next(self.f) # type: ignore[arg-type]
|
1313 |
+
else:
|
1314 |
+
line = next(self.f) # type: ignore[arg-type]
|
1315 |
+
# Note: 'colspecs' is a sequence of half-open intervals.
|
1316 |
+
return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs]
|
1317 |
+
|
1318 |
+
|
1319 |
+
class FixedWidthFieldParser(PythonParser):
|
1320 |
+
"""
|
1321 |
+
Specialization that Converts fixed-width fields into DataFrames.
|
1322 |
+
See PythonParser for details.
|
1323 |
+
"""
|
1324 |
+
|
1325 |
+
def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None:
|
1326 |
+
# Support iterators, convert to a list.
|
1327 |
+
self.colspecs = kwds.pop("colspecs")
|
1328 |
+
self.infer_nrows = kwds.pop("infer_nrows")
|
1329 |
+
PythonParser.__init__(self, f, **kwds)
|
1330 |
+
|
1331 |
+
def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader:
|
1332 |
+
return FixedWidthReader(
|
1333 |
+
f,
|
1334 |
+
self.colspecs,
|
1335 |
+
self.delimiter,
|
1336 |
+
self.comment,
|
1337 |
+
self.skiprows,
|
1338 |
+
self.infer_nrows,
|
1339 |
+
)
|
1340 |
+
|
1341 |
+
def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
|
1342 |
+
"""
|
1343 |
+
Returns the list of lines without the empty ones. With fixed-width
|
1344 |
+
fields, empty lines become arrays of empty strings.
|
1345 |
+
|
1346 |
+
See PythonParser._remove_empty_lines.
|
1347 |
+
"""
|
1348 |
+
return [
|
1349 |
+
line
|
1350 |
+
for line in lines
|
1351 |
+
if any(not isinstance(e, str) or e.strip() for e in line)
|
1352 |
+
]
|
1353 |
+
|
1354 |
+
|
1355 |
+
def count_empty_vals(vals) -> int:
|
1356 |
+
return sum(1 for v in vals if v == "" or v is None)
|
1357 |
+
|
1358 |
+
|
1359 |
+
def _validate_skipfooter_arg(skipfooter: int) -> int:
|
1360 |
+
"""
|
1361 |
+
Validate the 'skipfooter' parameter.
|
1362 |
+
|
1363 |
+
Checks whether 'skipfooter' is a non-negative integer.
|
1364 |
+
Raises a ValueError if that is not the case.
|
1365 |
+
|
1366 |
+
Parameters
|
1367 |
+
----------
|
1368 |
+
skipfooter : non-negative integer
|
1369 |
+
The number of rows to skip at the end of the file.
|
1370 |
+
|
1371 |
+
Returns
|
1372 |
+
-------
|
1373 |
+
validated_skipfooter : non-negative integer
|
1374 |
+
The original input if the validation succeeds.
|
1375 |
+
|
1376 |
+
Raises
|
1377 |
+
------
|
1378 |
+
ValueError : 'skipfooter' was not a non-negative integer.
|
1379 |
+
"""
|
1380 |
+
if not is_integer(skipfooter):
|
1381 |
+
raise ValueError("skipfooter must be an integer")
|
1382 |
+
|
1383 |
+
if skipfooter < 0:
|
1384 |
+
raise ValueError("skipfooter cannot be negative")
|
1385 |
+
|
1386 |
+
# Incompatible return value type (got "Union[int, integer[Any]]", expected "int")
|
1387 |
+
return skipfooter # type: ignore[return-value]
|
env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/readers.py
ADDED
@@ -0,0 +1,2383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Module contains tools for processing files into DataFrames or other objects
|
3 |
+
|
4 |
+
GH#48849 provides a convenient way of deprecating keyword arguments
|
5 |
+
"""
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
from collections import (
|
9 |
+
abc,
|
10 |
+
defaultdict,
|
11 |
+
)
|
12 |
+
import csv
|
13 |
+
import sys
|
14 |
+
from textwrap import fill
|
15 |
+
from typing import (
|
16 |
+
IO,
|
17 |
+
TYPE_CHECKING,
|
18 |
+
Any,
|
19 |
+
Callable,
|
20 |
+
Literal,
|
21 |
+
NamedTuple,
|
22 |
+
TypedDict,
|
23 |
+
overload,
|
24 |
+
)
|
25 |
+
import warnings
|
26 |
+
|
27 |
+
import numpy as np
|
28 |
+
|
29 |
+
from pandas._config import using_copy_on_write
|
30 |
+
|
31 |
+
from pandas._libs import lib
|
32 |
+
from pandas._libs.parsers import STR_NA_VALUES
|
33 |
+
from pandas.errors import (
|
34 |
+
AbstractMethodError,
|
35 |
+
ParserWarning,
|
36 |
+
)
|
37 |
+
from pandas.util._decorators import Appender
|
38 |
+
from pandas.util._exceptions import find_stack_level
|
39 |
+
from pandas.util._validators import check_dtype_backend
|
40 |
+
|
41 |
+
from pandas.core.dtypes.common import (
|
42 |
+
is_file_like,
|
43 |
+
is_float,
|
44 |
+
is_hashable,
|
45 |
+
is_integer,
|
46 |
+
is_list_like,
|
47 |
+
pandas_dtype,
|
48 |
+
)
|
49 |
+
|
50 |
+
from pandas import Series
|
51 |
+
from pandas.core.frame import DataFrame
|
52 |
+
from pandas.core.indexes.api import RangeIndex
|
53 |
+
from pandas.core.shared_docs import _shared_docs
|
54 |
+
|
55 |
+
from pandas.io.common import (
|
56 |
+
IOHandles,
|
57 |
+
get_handle,
|
58 |
+
stringify_path,
|
59 |
+
validate_header_arg,
|
60 |
+
)
|
61 |
+
from pandas.io.parsers.arrow_parser_wrapper import ArrowParserWrapper
|
62 |
+
from pandas.io.parsers.base_parser import (
|
63 |
+
ParserBase,
|
64 |
+
is_index_col,
|
65 |
+
parser_defaults,
|
66 |
+
)
|
67 |
+
from pandas.io.parsers.c_parser_wrapper import CParserWrapper
|
68 |
+
from pandas.io.parsers.python_parser import (
|
69 |
+
FixedWidthFieldParser,
|
70 |
+
PythonParser,
|
71 |
+
)
|
72 |
+
|
73 |
+
if TYPE_CHECKING:
|
74 |
+
from collections.abc import (
|
75 |
+
Hashable,
|
76 |
+
Iterable,
|
77 |
+
Mapping,
|
78 |
+
Sequence,
|
79 |
+
)
|
80 |
+
from types import TracebackType
|
81 |
+
|
82 |
+
from pandas._typing import (
|
83 |
+
CompressionOptions,
|
84 |
+
CSVEngine,
|
85 |
+
DtypeArg,
|
86 |
+
DtypeBackend,
|
87 |
+
FilePath,
|
88 |
+
IndexLabel,
|
89 |
+
ReadCsvBuffer,
|
90 |
+
Self,
|
91 |
+
StorageOptions,
|
92 |
+
UsecolsArgType,
|
93 |
+
)
|
94 |
+
_doc_read_csv_and_table = (
|
95 |
+
r"""
|
96 |
+
{summary}
|
97 |
+
|
98 |
+
Also supports optionally iterating or breaking of the file
|
99 |
+
into chunks.
|
100 |
+
|
101 |
+
Additional help can be found in the online docs for
|
102 |
+
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
|
103 |
+
|
104 |
+
Parameters
|
105 |
+
----------
|
106 |
+
filepath_or_buffer : str, path object or file-like object
|
107 |
+
Any valid string path is acceptable. The string could be a URL. Valid
|
108 |
+
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
|
109 |
+
expected. A local file could be: file://localhost/path/to/table.csv.
|
110 |
+
|
111 |
+
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
|
112 |
+
|
113 |
+
By file-like object, we refer to objects with a ``read()`` method, such as
|
114 |
+
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
|
115 |
+
sep : str, default {_default_sep}
|
116 |
+
Character or regex pattern to treat as the delimiter. If ``sep=None``, the
|
117 |
+
C engine cannot automatically detect
|
118 |
+
the separator, but the Python parsing engine can, meaning the latter will
|
119 |
+
be used and automatically detect the separator from only the first valid
|
120 |
+
row of the file by Python's builtin sniffer tool, ``csv.Sniffer``.
|
121 |
+
In addition, separators longer than 1 character and different from
|
122 |
+
``'\s+'`` will be interpreted as regular expressions and will also force
|
123 |
+
the use of the Python parsing engine. Note that regex delimiters are prone
|
124 |
+
to ignoring quoted data. Regex example: ``'\r\t'``.
|
125 |
+
delimiter : str, optional
|
126 |
+
Alias for ``sep``.
|
127 |
+
header : int, Sequence of int, 'infer' or None, default 'infer'
|
128 |
+
Row number(s) containing column labels and marking the start of the
|
129 |
+
data (zero-indexed). Default behavior is to infer the column names: if no ``names``
|
130 |
+
are passed the behavior is identical to ``header=0`` and column
|
131 |
+
names are inferred from the first line of the file, if column
|
132 |
+
names are passed explicitly to ``names`` then the behavior is identical to
|
133 |
+
``header=None``. Explicitly pass ``header=0`` to be able to
|
134 |
+
replace existing names. The header can be a list of integers that
|
135 |
+
specify row locations for a :class:`~pandas.MultiIndex` on the columns
|
136 |
+
e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be
|
137 |
+
skipped (e.g. 2 in this example is skipped). Note that this
|
138 |
+
parameter ignores commented lines and empty lines if
|
139 |
+
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
|
140 |
+
data rather than the first line of the file.
|
141 |
+
names : Sequence of Hashable, optional
|
142 |
+
Sequence of column labels to apply. If the file contains a header row,
|
143 |
+
then you should explicitly pass ``header=0`` to override the column names.
|
144 |
+
Duplicates in this list are not allowed.
|
145 |
+
index_col : Hashable, Sequence of Hashable or False, optional
|
146 |
+
Column(s) to use as row label(s), denoted either by column labels or column
|
147 |
+
indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex`
|
148 |
+
will be formed for the row labels.
|
149 |
+
|
150 |
+
Note: ``index_col=False`` can be used to force pandas to *not* use the first
|
151 |
+
column as the index, e.g., when you have a malformed file with delimiters at
|
152 |
+
the end of each line.
|
153 |
+
usecols : Sequence of Hashable or Callable, optional
|
154 |
+
Subset of columns to select, denoted either by column labels or column indices.
|
155 |
+
If list-like, all elements must either
|
156 |
+
be positional (i.e. integer indices into the document columns) or strings
|
157 |
+
that correspond to column names provided either by the user in ``names`` or
|
158 |
+
inferred from the document header row(s). If ``names`` are given, the document
|
159 |
+
header row(s) are not taken into account. For example, a valid list-like
|
160 |
+
``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
|
161 |
+
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
|
162 |
+
To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order
|
163 |
+
preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]``
|
164 |
+
for columns in ``['foo', 'bar']`` order or
|
165 |
+
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
|
166 |
+
for ``['bar', 'foo']`` order.
|
167 |
+
|
168 |
+
If callable, the callable function will be evaluated against the column
|
169 |
+
names, returning names where the callable function evaluates to ``True``. An
|
170 |
+
example of a valid callable argument would be ``lambda x: x.upper() in
|
171 |
+
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
|
172 |
+
parsing time and lower memory usage.
|
173 |
+
dtype : dtype or dict of {{Hashable : dtype}}, optional
|
174 |
+
Data type(s) to apply to either the whole dataset or individual columns.
|
175 |
+
E.g., ``{{'a': np.float64, 'b': np.int32, 'c': 'Int64'}}``
|
176 |
+
Use ``str`` or ``object`` together with suitable ``na_values`` settings
|
177 |
+
to preserve and not interpret ``dtype``.
|
178 |
+
If ``converters`` are specified, they will be applied INSTEAD
|
179 |
+
of ``dtype`` conversion.
|
180 |
+
|
181 |
+
.. versionadded:: 1.5.0
|
182 |
+
|
183 |
+
Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where
|
184 |
+
the default determines the ``dtype`` of the columns which are not explicitly
|
185 |
+
listed.
|
186 |
+
engine : {{'c', 'python', 'pyarrow'}}, optional
|
187 |
+
Parser engine to use. The C and pyarrow engines are faster, while the python engine
|
188 |
+
is currently more feature-complete. Multithreading is currently only supported by
|
189 |
+
the pyarrow engine.
|
190 |
+
|
191 |
+
.. versionadded:: 1.4.0
|
192 |
+
|
193 |
+
The 'pyarrow' engine was added as an *experimental* engine, and some features
|
194 |
+
are unsupported, or may not work correctly, with this engine.
|
195 |
+
converters : dict of {{Hashable : Callable}}, optional
|
196 |
+
Functions for converting values in specified columns. Keys can either
|
197 |
+
be column labels or column indices.
|
198 |
+
true_values : list, optional
|
199 |
+
Values to consider as ``True`` in addition to case-insensitive variants of 'True'.
|
200 |
+
false_values : list, optional
|
201 |
+
Values to consider as ``False`` in addition to case-insensitive variants of 'False'.
|
202 |
+
skipinitialspace : bool, default False
|
203 |
+
Skip spaces after delimiter.
|
204 |
+
skiprows : int, list of int or Callable, optional
|
205 |
+
Line numbers to skip (0-indexed) or number of lines to skip (``int``)
|
206 |
+
at the start of the file.
|
207 |
+
|
208 |
+
If callable, the callable function will be evaluated against the row
|
209 |
+
indices, returning ``True`` if the row should be skipped and ``False`` otherwise.
|
210 |
+
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
|
211 |
+
skipfooter : int, default 0
|
212 |
+
Number of lines at bottom of file to skip (Unsupported with ``engine='c'``).
|
213 |
+
nrows : int, optional
|
214 |
+
Number of rows of file to read. Useful for reading pieces of large files.
|
215 |
+
na_values : Hashable, Iterable of Hashable or dict of {{Hashable : Iterable}}, optional
|
216 |
+
Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific
|
217 |
+
per-column ``NA`` values. By default the following values are interpreted as
|
218 |
+
``NaN``: " """
|
219 |
+
+ fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
|
220 |
+
+ """ ".
|
221 |
+
|
222 |
+
keep_default_na : bool, default True
|
223 |
+
Whether or not to include the default ``NaN`` values when parsing the data.
|
224 |
+
Depending on whether ``na_values`` is passed in, the behavior is as follows:
|
225 |
+
|
226 |
+
* If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values``
|
227 |
+
is appended to the default ``NaN`` values used for parsing.
|
228 |
+
* If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only
|
229 |
+
the default ``NaN`` values are used for parsing.
|
230 |
+
* If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only
|
231 |
+
the ``NaN`` values specified ``na_values`` are used for parsing.
|
232 |
+
* If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no
|
233 |
+
strings will be parsed as ``NaN``.
|
234 |
+
|
235 |
+
Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and
|
236 |
+
``na_values`` parameters will be ignored.
|
237 |
+
na_filter : bool, default True
|
238 |
+
Detect missing value markers (empty strings and the value of ``na_values``). In
|
239 |
+
data without any ``NA`` values, passing ``na_filter=False`` can improve the
|
240 |
+
performance of reading a large file.
|
241 |
+
verbose : bool, default False
|
242 |
+
Indicate number of ``NA`` values placed in non-numeric columns.
|
243 |
+
|
244 |
+
.. deprecated:: 2.2.0
|
245 |
+
skip_blank_lines : bool, default True
|
246 |
+
If ``True``, skip over blank lines rather than interpreting as ``NaN`` values.
|
247 |
+
parse_dates : bool, list of Hashable, list of lists or dict of {{Hashable : list}}, \
|
248 |
+
default False
|
249 |
+
The behavior is as follows:
|
250 |
+
|
251 |
+
* ``bool``. If ``True`` -> try parsing the index. Note: Automatically set to
|
252 |
+
``True`` if ``date_format`` or ``date_parser`` arguments have been passed.
|
253 |
+
* ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3
|
254 |
+
each as a separate date column.
|
255 |
+
* ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse
|
256 |
+
as a single date column. Values are joined with a space before parsing.
|
257 |
+
* ``dict``, e.g. ``{{'foo' : [1, 3]}}`` -> parse columns 1, 3 as date and call
|
258 |
+
result 'foo'. Values are joined with a space before parsing.
|
259 |
+
|
260 |
+
If a column or index cannot be represented as an array of ``datetime``,
|
261 |
+
say because of an unparsable value or a mixture of timezones, the column
|
262 |
+
or index will be returned unaltered as an ``object`` data type. For
|
263 |
+
non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after
|
264 |
+
:func:`~pandas.read_csv`.
|
265 |
+
|
266 |
+
Note: A fast-path exists for iso8601-formatted dates.
|
267 |
+
infer_datetime_format : bool, default False
|
268 |
+
If ``True`` and ``parse_dates`` is enabled, pandas will attempt to infer the
|
269 |
+
format of the ``datetime`` strings in the columns, and if it can be inferred,
|
270 |
+
switch to a faster method of parsing them. In some cases this can increase
|
271 |
+
the parsing speed by 5-10x.
|
272 |
+
|
273 |
+
.. deprecated:: 2.0.0
|
274 |
+
A strict version of this argument is now the default, passing it has no effect.
|
275 |
+
|
276 |
+
keep_date_col : bool, default False
|
277 |
+
If ``True`` and ``parse_dates`` specifies combining multiple columns then
|
278 |
+
keep the original columns.
|
279 |
+
date_parser : Callable, optional
|
280 |
+
Function to use for converting a sequence of string columns to an array of
|
281 |
+
``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the
|
282 |
+
conversion. pandas will try to call ``date_parser`` in three different ways,
|
283 |
+
advancing to the next if an exception occurs: 1) Pass one or more arrays
|
284 |
+
(as defined by ``parse_dates``) as arguments; 2) concatenate (row-wise) the
|
285 |
+
string values from the columns defined by ``parse_dates`` into a single array
|
286 |
+
and pass that; and 3) call ``date_parser`` once for each row using one or
|
287 |
+
more strings (corresponding to the columns defined by ``parse_dates``) as
|
288 |
+
arguments.
|
289 |
+
|
290 |
+
.. deprecated:: 2.0.0
|
291 |
+
Use ``date_format`` instead, or read in as ``object`` and then apply
|
292 |
+
:func:`~pandas.to_datetime` as-needed.
|
293 |
+
date_format : str or dict of column -> format, optional
|
294 |
+
Format to use for parsing dates when used in conjunction with ``parse_dates``.
|
295 |
+
The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See
|
296 |
+
`strftime documentation
|
297 |
+
<https://docs.python.org/3/library/datetime.html
|
298 |
+
#strftime-and-strptime-behavior>`_ for more information on choices, though
|
299 |
+
note that :const:`"%f"` will parse all the way up to nanoseconds.
|
300 |
+
You can also pass:
|
301 |
+
|
302 |
+
- "ISO8601", to parse any `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
|
303 |
+
time string (not necessarily in exactly the same format);
|
304 |
+
- "mixed", to infer the format for each element individually. This is risky,
|
305 |
+
and you should probably use it along with `dayfirst`.
|
306 |
+
|
307 |
+
.. versionadded:: 2.0.0
|
308 |
+
dayfirst : bool, default False
|
309 |
+
DD/MM format dates, international and European format.
|
310 |
+
cache_dates : bool, default True
|
311 |
+
If ``True``, use a cache of unique, converted dates to apply the ``datetime``
|
312 |
+
conversion. May produce significant speed-up when parsing duplicate
|
313 |
+
date strings, especially ones with timezone offsets.
|
314 |
+
|
315 |
+
iterator : bool, default False
|
316 |
+
Return ``TextFileReader`` object for iteration or getting chunks with
|
317 |
+
``get_chunk()``.
|
318 |
+
chunksize : int, optional
|
319 |
+
Number of lines to read from the file per chunk. Passing a value will cause the
|
320 |
+
function to return a ``TextFileReader`` object for iteration.
|
321 |
+
See the `IO Tools docs
|
322 |
+
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
|
323 |
+
for more information on ``iterator`` and ``chunksize``.
|
324 |
+
|
325 |
+
{decompression_options}
|
326 |
+
|
327 |
+
.. versionchanged:: 1.4.0 Zstandard support.
|
328 |
+
|
329 |
+
thousands : str (length 1), optional
|
330 |
+
Character acting as the thousands separator in numerical values.
|
331 |
+
decimal : str (length 1), default '.'
|
332 |
+
Character to recognize as decimal point (e.g., use ',' for European data).
|
333 |
+
lineterminator : str (length 1), optional
|
334 |
+
Character used to denote a line break. Only valid with C parser.
|
335 |
+
quotechar : str (length 1), optional
|
336 |
+
Character used to denote the start and end of a quoted item. Quoted
|
337 |
+
items can include the ``delimiter`` and it will be ignored.
|
338 |
+
quoting : {{0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, \
|
339 |
+
3 or csv.QUOTE_NONE}}, default csv.QUOTE_MINIMAL
|
340 |
+
Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is
|
341 |
+
``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special
|
342 |
+
characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``,
|
343 |
+
or ``lineterminator``.
|
344 |
+
doublequote : bool, default True
|
345 |
+
When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate
|
346 |
+
whether or not to interpret two consecutive ``quotechar`` elements INSIDE a
|
347 |
+
field as a single ``quotechar`` element.
|
348 |
+
escapechar : str (length 1), optional
|
349 |
+
Character used to escape other characters.
|
350 |
+
comment : str (length 1), optional
|
351 |
+
Character indicating that the remainder of line should not be parsed.
|
352 |
+
If found at the beginning
|
353 |
+
of a line, the line will be ignored altogether. This parameter must be a
|
354 |
+
single character. Like empty lines (as long as ``skip_blank_lines=True``),
|
355 |
+
fully commented lines are ignored by the parameter ``header`` but not by
|
356 |
+
``skiprows``. For example, if ``comment='#'``, parsing
|
357 |
+
``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being
|
358 |
+
treated as the header.
|
359 |
+
encoding : str, optional, default 'utf-8'
|
360 |
+
Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python
|
361 |
+
standard encodings
|
362 |
+
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
|
363 |
+
|
364 |
+
encoding_errors : str, optional, default 'strict'
|
365 |
+
How encoding errors are treated. `List of possible values
|
366 |
+
<https://docs.python.org/3/library/codecs.html#error-handlers>`_ .
|
367 |
+
|
368 |
+
.. versionadded:: 1.3.0
|
369 |
+
|
370 |
+
dialect : str or csv.Dialect, optional
|
371 |
+
If provided, this parameter will override values (default or not) for the
|
372 |
+
following parameters: ``delimiter``, ``doublequote``, ``escapechar``,
|
373 |
+
``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to
|
374 |
+
override values, a ``ParserWarning`` will be issued. See ``csv.Dialect``
|
375 |
+
documentation for more details.
|
376 |
+
on_bad_lines : {{'error', 'warn', 'skip'}} or Callable, default 'error'
|
377 |
+
Specifies what to do upon encountering a bad line (a line with too many fields).
|
378 |
+
Allowed values are :
|
379 |
+
|
380 |
+
- ``'error'``, raise an Exception when a bad line is encountered.
|
381 |
+
- ``'warn'``, raise a warning when a bad line is encountered and skip that line.
|
382 |
+
- ``'skip'``, skip bad lines without raising or warning when they are encountered.
|
383 |
+
|
384 |
+
.. versionadded:: 1.3.0
|
385 |
+
|
386 |
+
.. versionadded:: 1.4.0
|
387 |
+
|
388 |
+
- Callable, function with signature
|
389 |
+
``(bad_line: list[str]) -> list[str] | None`` that will process a single
|
390 |
+
bad line. ``bad_line`` is a list of strings split by the ``sep``.
|
391 |
+
If the function returns ``None``, the bad line will be ignored.
|
392 |
+
If the function returns a new ``list`` of strings with more elements than
|
393 |
+
expected, a ``ParserWarning`` will be emitted while dropping extra elements.
|
394 |
+
Only supported when ``engine='python'``
|
395 |
+
|
396 |
+
.. versionchanged:: 2.2.0
|
397 |
+
|
398 |
+
- Callable, function with signature
|
399 |
+
as described in `pyarrow documentation
|
400 |
+
<https://arrow.apache.org/docs/python/generated/pyarrow.csv.ParseOptions.html
|
401 |
+
#pyarrow.csv.ParseOptions.invalid_row_handler>`_ when ``engine='pyarrow'``
|
402 |
+
|
403 |
+
delim_whitespace : bool, default False
|
404 |
+
Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be
|
405 |
+
used as the ``sep`` delimiter. Equivalent to setting ``sep='\\s+'``. If this option
|
406 |
+
is set to ``True``, nothing should be passed in for the ``delimiter``
|
407 |
+
parameter.
|
408 |
+
|
409 |
+
.. deprecated:: 2.2.0
|
410 |
+
Use ``sep="\\s+"`` instead.
|
411 |
+
low_memory : bool, default True
|
412 |
+
Internally process the file in chunks, resulting in lower memory use
|
413 |
+
while parsing, but possibly mixed type inference. To ensure no mixed
|
414 |
+
types either set ``False``, or specify the type with the ``dtype`` parameter.
|
415 |
+
Note that the entire file is read into a single :class:`~pandas.DataFrame`
|
416 |
+
regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in
|
417 |
+
chunks. (Only valid with C parser).
|
418 |
+
memory_map : bool, default False
|
419 |
+
If a filepath is provided for ``filepath_or_buffer``, map the file object
|
420 |
+
directly onto memory and access the data directly from there. Using this
|
421 |
+
option can improve performance because there is no longer any I/O overhead.
|
422 |
+
float_precision : {{'high', 'legacy', 'round_trip'}}, optional
|
423 |
+
Specifies which converter the C engine should use for floating-point
|
424 |
+
values. The options are ``None`` or ``'high'`` for the ordinary converter,
|
425 |
+
``'legacy'`` for the original lower precision pandas converter, and
|
426 |
+
``'round_trip'`` for the round-trip converter.
|
427 |
+
|
428 |
+
{storage_options}
|
429 |
+
|
430 |
+
dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
|
431 |
+
Back-end data type applied to the resultant :class:`DataFrame`
|
432 |
+
(still experimental). Behaviour is as follows:
|
433 |
+
|
434 |
+
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
|
435 |
+
(default).
|
436 |
+
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
|
437 |
+
DataFrame.
|
438 |
+
|
439 |
+
.. versionadded:: 2.0
|
440 |
+
|
441 |
+
Returns
|
442 |
+
-------
|
443 |
+
DataFrame or TextFileReader
|
444 |
+
A comma-separated values (csv) file is returned as two-dimensional
|
445 |
+
data structure with labeled axes.
|
446 |
+
|
447 |
+
See Also
|
448 |
+
--------
|
449 |
+
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
|
450 |
+
{see_also_func_name} : {see_also_func_summary}
|
451 |
+
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
|
452 |
+
|
453 |
+
Examples
|
454 |
+
--------
|
455 |
+
>>> pd.{func_name}('data.csv') # doctest: +SKIP
|
456 |
+
"""
|
457 |
+
)
|
458 |
+
|
459 |
+
|
460 |
+
class _C_Parser_Defaults(TypedDict):
|
461 |
+
delim_whitespace: Literal[False]
|
462 |
+
na_filter: Literal[True]
|
463 |
+
low_memory: Literal[True]
|
464 |
+
memory_map: Literal[False]
|
465 |
+
float_precision: None
|
466 |
+
|
467 |
+
|
468 |
+
_c_parser_defaults: _C_Parser_Defaults = {
|
469 |
+
"delim_whitespace": False,
|
470 |
+
"na_filter": True,
|
471 |
+
"low_memory": True,
|
472 |
+
"memory_map": False,
|
473 |
+
"float_precision": None,
|
474 |
+
}
|
475 |
+
|
476 |
+
|
477 |
+
class _Fwf_Defaults(TypedDict):
|
478 |
+
colspecs: Literal["infer"]
|
479 |
+
infer_nrows: Literal[100]
|
480 |
+
widths: None
|
481 |
+
|
482 |
+
|
483 |
+
_fwf_defaults: _Fwf_Defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
|
484 |
+
_c_unsupported = {"skipfooter"}
|
485 |
+
_python_unsupported = {"low_memory", "float_precision"}
|
486 |
+
_pyarrow_unsupported = {
|
487 |
+
"skipfooter",
|
488 |
+
"float_precision",
|
489 |
+
"chunksize",
|
490 |
+
"comment",
|
491 |
+
"nrows",
|
492 |
+
"thousands",
|
493 |
+
"memory_map",
|
494 |
+
"dialect",
|
495 |
+
"delim_whitespace",
|
496 |
+
"quoting",
|
497 |
+
"lineterminator",
|
498 |
+
"converters",
|
499 |
+
"iterator",
|
500 |
+
"dayfirst",
|
501 |
+
"verbose",
|
502 |
+
"skipinitialspace",
|
503 |
+
"low_memory",
|
504 |
+
}
|
505 |
+
|
506 |
+
|
507 |
+
class _DeprecationConfig(NamedTuple):
|
508 |
+
default_value: Any
|
509 |
+
msg: str | None
|
510 |
+
|
511 |
+
|
512 |
+
@overload
|
513 |
+
def validate_integer(name: str, val: None, min_val: int = ...) -> None:
|
514 |
+
...
|
515 |
+
|
516 |
+
|
517 |
+
@overload
|
518 |
+
def validate_integer(name: str, val: float, min_val: int = ...) -> int:
|
519 |
+
...
|
520 |
+
|
521 |
+
|
522 |
+
@overload
|
523 |
+
def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None:
|
524 |
+
...
|
525 |
+
|
526 |
+
|
527 |
+
def validate_integer(
|
528 |
+
name: str, val: int | float | None, min_val: int = 0
|
529 |
+
) -> int | None:
|
530 |
+
"""
|
531 |
+
Checks whether the 'name' parameter for parsing is either
|
532 |
+
an integer OR float that can SAFELY be cast to an integer
|
533 |
+
without losing accuracy. Raises a ValueError if that is
|
534 |
+
not the case.
|
535 |
+
|
536 |
+
Parameters
|
537 |
+
----------
|
538 |
+
name : str
|
539 |
+
Parameter name (used for error reporting)
|
540 |
+
val : int or float
|
541 |
+
The value to check
|
542 |
+
min_val : int
|
543 |
+
Minimum allowed value (val < min_val will result in a ValueError)
|
544 |
+
"""
|
545 |
+
if val is None:
|
546 |
+
return val
|
547 |
+
|
548 |
+
msg = f"'{name:s}' must be an integer >={min_val:d}"
|
549 |
+
if is_float(val):
|
550 |
+
if int(val) != val:
|
551 |
+
raise ValueError(msg)
|
552 |
+
val = int(val)
|
553 |
+
elif not (is_integer(val) and val >= min_val):
|
554 |
+
raise ValueError(msg)
|
555 |
+
|
556 |
+
return int(val)
|
557 |
+
|
558 |
+
|
559 |
+
def _validate_names(names: Sequence[Hashable] | None) -> None:
|
560 |
+
"""
|
561 |
+
Raise ValueError if the `names` parameter contains duplicates or has an
|
562 |
+
invalid data type.
|
563 |
+
|
564 |
+
Parameters
|
565 |
+
----------
|
566 |
+
names : array-like or None
|
567 |
+
An array containing a list of the names used for the output DataFrame.
|
568 |
+
|
569 |
+
Raises
|
570 |
+
------
|
571 |
+
ValueError
|
572 |
+
If names are not unique or are not ordered (e.g. set).
|
573 |
+
"""
|
574 |
+
if names is not None:
|
575 |
+
if len(names) != len(set(names)):
|
576 |
+
raise ValueError("Duplicate names are not allowed.")
|
577 |
+
if not (
|
578 |
+
is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
|
579 |
+
):
|
580 |
+
raise ValueError("Names should be an ordered collection.")
|
581 |
+
|
582 |
+
|
583 |
+
def _read(
|
584 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds
|
585 |
+
) -> DataFrame | TextFileReader:
|
586 |
+
"""Generic reader of line files."""
|
587 |
+
# if we pass a date_parser and parse_dates=False, we should not parse the
|
588 |
+
# dates GH#44366
|
589 |
+
if kwds.get("parse_dates", None) is None:
|
590 |
+
if (
|
591 |
+
kwds.get("date_parser", lib.no_default) is lib.no_default
|
592 |
+
and kwds.get("date_format", None) is None
|
593 |
+
):
|
594 |
+
kwds["parse_dates"] = False
|
595 |
+
else:
|
596 |
+
kwds["parse_dates"] = True
|
597 |
+
|
598 |
+
# Extract some of the arguments (pass chunksize on).
|
599 |
+
iterator = kwds.get("iterator", False)
|
600 |
+
chunksize = kwds.get("chunksize", None)
|
601 |
+
if kwds.get("engine") == "pyarrow":
|
602 |
+
if iterator:
|
603 |
+
raise ValueError(
|
604 |
+
"The 'iterator' option is not supported with the 'pyarrow' engine"
|
605 |
+
)
|
606 |
+
|
607 |
+
if chunksize is not None:
|
608 |
+
raise ValueError(
|
609 |
+
"The 'chunksize' option is not supported with the 'pyarrow' engine"
|
610 |
+
)
|
611 |
+
else:
|
612 |
+
chunksize = validate_integer("chunksize", chunksize, 1)
|
613 |
+
|
614 |
+
nrows = kwds.get("nrows", None)
|
615 |
+
|
616 |
+
# Check for duplicates in names.
|
617 |
+
_validate_names(kwds.get("names", None))
|
618 |
+
|
619 |
+
# Create the parser.
|
620 |
+
parser = TextFileReader(filepath_or_buffer, **kwds)
|
621 |
+
|
622 |
+
if chunksize or iterator:
|
623 |
+
return parser
|
624 |
+
|
625 |
+
with parser:
|
626 |
+
return parser.read(nrows)
|
627 |
+
|
628 |
+
|
629 |
+
# iterator=True -> TextFileReader
|
630 |
+
@overload
|
631 |
+
def read_csv(
|
632 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
633 |
+
*,
|
634 |
+
sep: str | None | lib.NoDefault = ...,
|
635 |
+
delimiter: str | None | lib.NoDefault = ...,
|
636 |
+
header: int | Sequence[int] | None | Literal["infer"] = ...,
|
637 |
+
names: Sequence[Hashable] | None | lib.NoDefault = ...,
|
638 |
+
index_col: IndexLabel | Literal[False] | None = ...,
|
639 |
+
usecols: UsecolsArgType = ...,
|
640 |
+
dtype: DtypeArg | None = ...,
|
641 |
+
engine: CSVEngine | None = ...,
|
642 |
+
converters: Mapping[Hashable, Callable] | None = ...,
|
643 |
+
true_values: list | None = ...,
|
644 |
+
false_values: list | None = ...,
|
645 |
+
skipinitialspace: bool = ...,
|
646 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
|
647 |
+
skipfooter: int = ...,
|
648 |
+
nrows: int | None = ...,
|
649 |
+
na_values: Hashable
|
650 |
+
| Iterable[Hashable]
|
651 |
+
| Mapping[Hashable, Iterable[Hashable]]
|
652 |
+
| None = ...,
|
653 |
+
na_filter: bool = ...,
|
654 |
+
verbose: bool | lib.NoDefault = ...,
|
655 |
+
skip_blank_lines: bool = ...,
|
656 |
+
parse_dates: bool | Sequence[Hashable] | None = ...,
|
657 |
+
infer_datetime_format: bool | lib.NoDefault = ...,
|
658 |
+
keep_date_col: bool | lib.NoDefault = ...,
|
659 |
+
date_parser: Callable | lib.NoDefault = ...,
|
660 |
+
date_format: str | dict[Hashable, str] | None = ...,
|
661 |
+
dayfirst: bool = ...,
|
662 |
+
cache_dates: bool = ...,
|
663 |
+
iterator: Literal[True],
|
664 |
+
chunksize: int | None = ...,
|
665 |
+
compression: CompressionOptions = ...,
|
666 |
+
thousands: str | None = ...,
|
667 |
+
decimal: str = ...,
|
668 |
+
lineterminator: str | None = ...,
|
669 |
+
quotechar: str = ...,
|
670 |
+
quoting: int = ...,
|
671 |
+
doublequote: bool = ...,
|
672 |
+
escapechar: str | None = ...,
|
673 |
+
comment: str | None = ...,
|
674 |
+
encoding: str | None = ...,
|
675 |
+
encoding_errors: str | None = ...,
|
676 |
+
dialect: str | csv.Dialect | None = ...,
|
677 |
+
on_bad_lines=...,
|
678 |
+
delim_whitespace: bool | lib.NoDefault = ...,
|
679 |
+
low_memory: bool = ...,
|
680 |
+
memory_map: bool = ...,
|
681 |
+
float_precision: Literal["high", "legacy"] | None = ...,
|
682 |
+
storage_options: StorageOptions = ...,
|
683 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
684 |
+
) -> TextFileReader:
|
685 |
+
...
|
686 |
+
|
687 |
+
|
688 |
+
# chunksize=int -> TextFileReader
|
689 |
+
@overload
|
690 |
+
def read_csv(
|
691 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
692 |
+
*,
|
693 |
+
sep: str | None | lib.NoDefault = ...,
|
694 |
+
delimiter: str | None | lib.NoDefault = ...,
|
695 |
+
header: int | Sequence[int] | None | Literal["infer"] = ...,
|
696 |
+
names: Sequence[Hashable] | None | lib.NoDefault = ...,
|
697 |
+
index_col: IndexLabel | Literal[False] | None = ...,
|
698 |
+
usecols: UsecolsArgType = ...,
|
699 |
+
dtype: DtypeArg | None = ...,
|
700 |
+
engine: CSVEngine | None = ...,
|
701 |
+
converters: Mapping[Hashable, Callable] | None = ...,
|
702 |
+
true_values: list | None = ...,
|
703 |
+
false_values: list | None = ...,
|
704 |
+
skipinitialspace: bool = ...,
|
705 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
|
706 |
+
skipfooter: int = ...,
|
707 |
+
nrows: int | None = ...,
|
708 |
+
na_values: Hashable
|
709 |
+
| Iterable[Hashable]
|
710 |
+
| Mapping[Hashable, Iterable[Hashable]]
|
711 |
+
| None = ...,
|
712 |
+
keep_default_na: bool = ...,
|
713 |
+
na_filter: bool = ...,
|
714 |
+
verbose: bool | lib.NoDefault = ...,
|
715 |
+
skip_blank_lines: bool = ...,
|
716 |
+
parse_dates: bool | Sequence[Hashable] | None = ...,
|
717 |
+
infer_datetime_format: bool | lib.NoDefault = ...,
|
718 |
+
keep_date_col: bool | lib.NoDefault = ...,
|
719 |
+
date_parser: Callable | lib.NoDefault = ...,
|
720 |
+
date_format: str | dict[Hashable, str] | None = ...,
|
721 |
+
dayfirst: bool = ...,
|
722 |
+
cache_dates: bool = ...,
|
723 |
+
iterator: bool = ...,
|
724 |
+
chunksize: int,
|
725 |
+
compression: CompressionOptions = ...,
|
726 |
+
thousands: str | None = ...,
|
727 |
+
decimal: str = ...,
|
728 |
+
lineterminator: str | None = ...,
|
729 |
+
quotechar: str = ...,
|
730 |
+
quoting: int = ...,
|
731 |
+
doublequote: bool = ...,
|
732 |
+
escapechar: str | None = ...,
|
733 |
+
comment: str | None = ...,
|
734 |
+
encoding: str | None = ...,
|
735 |
+
encoding_errors: str | None = ...,
|
736 |
+
dialect: str | csv.Dialect | None = ...,
|
737 |
+
on_bad_lines=...,
|
738 |
+
delim_whitespace: bool | lib.NoDefault = ...,
|
739 |
+
low_memory: bool = ...,
|
740 |
+
memory_map: bool = ...,
|
741 |
+
float_precision: Literal["high", "legacy"] | None = ...,
|
742 |
+
storage_options: StorageOptions = ...,
|
743 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
744 |
+
) -> TextFileReader:
|
745 |
+
...
|
746 |
+
|
747 |
+
|
748 |
+
# default case -> DataFrame
|
749 |
+
@overload
|
750 |
+
def read_csv(
|
751 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
752 |
+
*,
|
753 |
+
sep: str | None | lib.NoDefault = ...,
|
754 |
+
delimiter: str | None | lib.NoDefault = ...,
|
755 |
+
header: int | Sequence[int] | None | Literal["infer"] = ...,
|
756 |
+
names: Sequence[Hashable] | None | lib.NoDefault = ...,
|
757 |
+
index_col: IndexLabel | Literal[False] | None = ...,
|
758 |
+
usecols: UsecolsArgType = ...,
|
759 |
+
dtype: DtypeArg | None = ...,
|
760 |
+
engine: CSVEngine | None = ...,
|
761 |
+
converters: Mapping[Hashable, Callable] | None = ...,
|
762 |
+
true_values: list | None = ...,
|
763 |
+
false_values: list | None = ...,
|
764 |
+
skipinitialspace: bool = ...,
|
765 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
|
766 |
+
skipfooter: int = ...,
|
767 |
+
nrows: int | None = ...,
|
768 |
+
na_values: Hashable
|
769 |
+
| Iterable[Hashable]
|
770 |
+
| Mapping[Hashable, Iterable[Hashable]]
|
771 |
+
| None = ...,
|
772 |
+
keep_default_na: bool = ...,
|
773 |
+
na_filter: bool = ...,
|
774 |
+
verbose: bool | lib.NoDefault = ...,
|
775 |
+
skip_blank_lines: bool = ...,
|
776 |
+
parse_dates: bool | Sequence[Hashable] | None = ...,
|
777 |
+
infer_datetime_format: bool | lib.NoDefault = ...,
|
778 |
+
keep_date_col: bool | lib.NoDefault = ...,
|
779 |
+
date_parser: Callable | lib.NoDefault = ...,
|
780 |
+
date_format: str | dict[Hashable, str] | None = ...,
|
781 |
+
dayfirst: bool = ...,
|
782 |
+
cache_dates: bool = ...,
|
783 |
+
iterator: Literal[False] = ...,
|
784 |
+
chunksize: None = ...,
|
785 |
+
compression: CompressionOptions = ...,
|
786 |
+
thousands: str | None = ...,
|
787 |
+
decimal: str = ...,
|
788 |
+
lineterminator: str | None = ...,
|
789 |
+
quotechar: str = ...,
|
790 |
+
quoting: int = ...,
|
791 |
+
doublequote: bool = ...,
|
792 |
+
escapechar: str | None = ...,
|
793 |
+
comment: str | None = ...,
|
794 |
+
encoding: str | None = ...,
|
795 |
+
encoding_errors: str | None = ...,
|
796 |
+
dialect: str | csv.Dialect | None = ...,
|
797 |
+
on_bad_lines=...,
|
798 |
+
delim_whitespace: bool | lib.NoDefault = ...,
|
799 |
+
low_memory: bool = ...,
|
800 |
+
memory_map: bool = ...,
|
801 |
+
float_precision: Literal["high", "legacy"] | None = ...,
|
802 |
+
storage_options: StorageOptions = ...,
|
803 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
804 |
+
) -> DataFrame:
|
805 |
+
...
|
806 |
+
|
807 |
+
|
808 |
+
# Unions -> DataFrame | TextFileReader
|
809 |
+
@overload
|
810 |
+
def read_csv(
|
811 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
812 |
+
*,
|
813 |
+
sep: str | None | lib.NoDefault = ...,
|
814 |
+
delimiter: str | None | lib.NoDefault = ...,
|
815 |
+
header: int | Sequence[int] | None | Literal["infer"] = ...,
|
816 |
+
names: Sequence[Hashable] | None | lib.NoDefault = ...,
|
817 |
+
index_col: IndexLabel | Literal[False] | None = ...,
|
818 |
+
usecols: UsecolsArgType = ...,
|
819 |
+
dtype: DtypeArg | None = ...,
|
820 |
+
engine: CSVEngine | None = ...,
|
821 |
+
converters: Mapping[Hashable, Callable] | None = ...,
|
822 |
+
true_values: list | None = ...,
|
823 |
+
false_values: list | None = ...,
|
824 |
+
skipinitialspace: bool = ...,
|
825 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
|
826 |
+
skipfooter: int = ...,
|
827 |
+
nrows: int | None = ...,
|
828 |
+
na_values: Hashable
|
829 |
+
| Iterable[Hashable]
|
830 |
+
| Mapping[Hashable, Iterable[Hashable]]
|
831 |
+
| None = ...,
|
832 |
+
keep_default_na: bool = ...,
|
833 |
+
na_filter: bool = ...,
|
834 |
+
verbose: bool | lib.NoDefault = ...,
|
835 |
+
skip_blank_lines: bool = ...,
|
836 |
+
parse_dates: bool | Sequence[Hashable] | None = ...,
|
837 |
+
infer_datetime_format: bool | lib.NoDefault = ...,
|
838 |
+
keep_date_col: bool | lib.NoDefault = ...,
|
839 |
+
date_parser: Callable | lib.NoDefault = ...,
|
840 |
+
date_format: str | dict[Hashable, str] | None = ...,
|
841 |
+
dayfirst: bool = ...,
|
842 |
+
cache_dates: bool = ...,
|
843 |
+
iterator: bool = ...,
|
844 |
+
chunksize: int | None = ...,
|
845 |
+
compression: CompressionOptions = ...,
|
846 |
+
thousands: str | None = ...,
|
847 |
+
decimal: str = ...,
|
848 |
+
lineterminator: str | None = ...,
|
849 |
+
quotechar: str = ...,
|
850 |
+
quoting: int = ...,
|
851 |
+
doublequote: bool = ...,
|
852 |
+
escapechar: str | None = ...,
|
853 |
+
comment: str | None = ...,
|
854 |
+
encoding: str | None = ...,
|
855 |
+
encoding_errors: str | None = ...,
|
856 |
+
dialect: str | csv.Dialect | None = ...,
|
857 |
+
on_bad_lines=...,
|
858 |
+
delim_whitespace: bool | lib.NoDefault = ...,
|
859 |
+
low_memory: bool = ...,
|
860 |
+
memory_map: bool = ...,
|
861 |
+
float_precision: Literal["high", "legacy"] | None = ...,
|
862 |
+
storage_options: StorageOptions = ...,
|
863 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
864 |
+
) -> DataFrame | TextFileReader:
|
865 |
+
...
|
866 |
+
|
867 |
+
|
868 |
+
@Appender(
|
869 |
+
_doc_read_csv_and_table.format(
|
870 |
+
func_name="read_csv",
|
871 |
+
summary="Read a comma-separated values (csv) file into DataFrame.",
|
872 |
+
see_also_func_name="read_table",
|
873 |
+
see_also_func_summary="Read general delimited file into DataFrame.",
|
874 |
+
_default_sep="','",
|
875 |
+
storage_options=_shared_docs["storage_options"],
|
876 |
+
decompression_options=_shared_docs["decompression_options"]
|
877 |
+
% "filepath_or_buffer",
|
878 |
+
)
|
879 |
+
)
|
880 |
+
def read_csv(
|
881 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
882 |
+
*,
|
883 |
+
sep: str | None | lib.NoDefault = lib.no_default,
|
884 |
+
delimiter: str | None | lib.NoDefault = None,
|
885 |
+
# Column and Index Locations and Names
|
886 |
+
header: int | Sequence[int] | None | Literal["infer"] = "infer",
|
887 |
+
names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,
|
888 |
+
index_col: IndexLabel | Literal[False] | None = None,
|
889 |
+
usecols: UsecolsArgType = None,
|
890 |
+
# General Parsing Configuration
|
891 |
+
dtype: DtypeArg | None = None,
|
892 |
+
engine: CSVEngine | None = None,
|
893 |
+
converters: Mapping[Hashable, Callable] | None = None,
|
894 |
+
true_values: list | None = None,
|
895 |
+
false_values: list | None = None,
|
896 |
+
skipinitialspace: bool = False,
|
897 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = None,
|
898 |
+
skipfooter: int = 0,
|
899 |
+
nrows: int | None = None,
|
900 |
+
# NA and Missing Data Handling
|
901 |
+
na_values: Hashable
|
902 |
+
| Iterable[Hashable]
|
903 |
+
| Mapping[Hashable, Iterable[Hashable]]
|
904 |
+
| None = None,
|
905 |
+
keep_default_na: bool = True,
|
906 |
+
na_filter: bool = True,
|
907 |
+
verbose: bool | lib.NoDefault = lib.no_default,
|
908 |
+
skip_blank_lines: bool = True,
|
909 |
+
# Datetime Handling
|
910 |
+
parse_dates: bool | Sequence[Hashable] | None = None,
|
911 |
+
infer_datetime_format: bool | lib.NoDefault = lib.no_default,
|
912 |
+
keep_date_col: bool | lib.NoDefault = lib.no_default,
|
913 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
914 |
+
date_format: str | dict[Hashable, str] | None = None,
|
915 |
+
dayfirst: bool = False,
|
916 |
+
cache_dates: bool = True,
|
917 |
+
# Iteration
|
918 |
+
iterator: bool = False,
|
919 |
+
chunksize: int | None = None,
|
920 |
+
# Quoting, Compression, and File Format
|
921 |
+
compression: CompressionOptions = "infer",
|
922 |
+
thousands: str | None = None,
|
923 |
+
decimal: str = ".",
|
924 |
+
lineterminator: str | None = None,
|
925 |
+
quotechar: str = '"',
|
926 |
+
quoting: int = csv.QUOTE_MINIMAL,
|
927 |
+
doublequote: bool = True,
|
928 |
+
escapechar: str | None = None,
|
929 |
+
comment: str | None = None,
|
930 |
+
encoding: str | None = None,
|
931 |
+
encoding_errors: str | None = "strict",
|
932 |
+
dialect: str | csv.Dialect | None = None,
|
933 |
+
# Error Handling
|
934 |
+
on_bad_lines: str = "error",
|
935 |
+
# Internal
|
936 |
+
delim_whitespace: bool | lib.NoDefault = lib.no_default,
|
937 |
+
low_memory: bool = _c_parser_defaults["low_memory"],
|
938 |
+
memory_map: bool = False,
|
939 |
+
float_precision: Literal["high", "legacy"] | None = None,
|
940 |
+
storage_options: StorageOptions | None = None,
|
941 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
942 |
+
) -> DataFrame | TextFileReader:
|
943 |
+
if keep_date_col is not lib.no_default:
|
944 |
+
# GH#55569
|
945 |
+
warnings.warn(
|
946 |
+
"The 'keep_date_col' keyword in pd.read_csv is deprecated and "
|
947 |
+
"will be removed in a future version. Explicitly remove unwanted "
|
948 |
+
"columns after parsing instead.",
|
949 |
+
FutureWarning,
|
950 |
+
stacklevel=find_stack_level(),
|
951 |
+
)
|
952 |
+
else:
|
953 |
+
keep_date_col = False
|
954 |
+
|
955 |
+
if lib.is_list_like(parse_dates):
|
956 |
+
# GH#55569
|
957 |
+
depr = False
|
958 |
+
# error: Item "bool" of "bool | Sequence[Hashable] | None" has no
|
959 |
+
# attribute "__iter__" (not iterable)
|
960 |
+
if not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr]
|
961 |
+
depr = True
|
962 |
+
elif isinstance(parse_dates, dict) and any(
|
963 |
+
lib.is_list_like(x) for x in parse_dates.values()
|
964 |
+
):
|
965 |
+
depr = True
|
966 |
+
if depr:
|
967 |
+
warnings.warn(
|
968 |
+
"Support for nested sequences for 'parse_dates' in pd.read_csv "
|
969 |
+
"is deprecated. Combine the desired columns with pd.to_datetime "
|
970 |
+
"after parsing instead.",
|
971 |
+
FutureWarning,
|
972 |
+
stacklevel=find_stack_level(),
|
973 |
+
)
|
974 |
+
|
975 |
+
if infer_datetime_format is not lib.no_default:
|
976 |
+
warnings.warn(
|
977 |
+
"The argument 'infer_datetime_format' is deprecated and will "
|
978 |
+
"be removed in a future version. "
|
979 |
+
"A strict version of it is now the default, see "
|
980 |
+
"https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
|
981 |
+
"You can safely remove this argument.",
|
982 |
+
FutureWarning,
|
983 |
+
stacklevel=find_stack_level(),
|
984 |
+
)
|
985 |
+
|
986 |
+
if delim_whitespace is not lib.no_default:
|
987 |
+
# GH#55569
|
988 |
+
warnings.warn(
|
989 |
+
"The 'delim_whitespace' keyword in pd.read_csv is deprecated and "
|
990 |
+
"will be removed in a future version. Use ``sep='\\s+'`` instead",
|
991 |
+
FutureWarning,
|
992 |
+
stacklevel=find_stack_level(),
|
993 |
+
)
|
994 |
+
else:
|
995 |
+
delim_whitespace = False
|
996 |
+
|
997 |
+
if verbose is not lib.no_default:
|
998 |
+
# GH#55569
|
999 |
+
warnings.warn(
|
1000 |
+
"The 'verbose' keyword in pd.read_csv is deprecated and "
|
1001 |
+
"will be removed in a future version.",
|
1002 |
+
FutureWarning,
|
1003 |
+
stacklevel=find_stack_level(),
|
1004 |
+
)
|
1005 |
+
else:
|
1006 |
+
verbose = False
|
1007 |
+
|
1008 |
+
# locals() should never be modified
|
1009 |
+
kwds = locals().copy()
|
1010 |
+
del kwds["filepath_or_buffer"]
|
1011 |
+
del kwds["sep"]
|
1012 |
+
|
1013 |
+
kwds_defaults = _refine_defaults_read(
|
1014 |
+
dialect,
|
1015 |
+
delimiter,
|
1016 |
+
delim_whitespace,
|
1017 |
+
engine,
|
1018 |
+
sep,
|
1019 |
+
on_bad_lines,
|
1020 |
+
names,
|
1021 |
+
defaults={"delimiter": ","},
|
1022 |
+
dtype_backend=dtype_backend,
|
1023 |
+
)
|
1024 |
+
kwds.update(kwds_defaults)
|
1025 |
+
|
1026 |
+
return _read(filepath_or_buffer, kwds)
|
1027 |
+
|
1028 |
+
|
1029 |
+
# iterator=True -> TextFileReader
|
1030 |
+
@overload
|
1031 |
+
def read_table(
|
1032 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
1033 |
+
*,
|
1034 |
+
sep: str | None | lib.NoDefault = ...,
|
1035 |
+
delimiter: str | None | lib.NoDefault = ...,
|
1036 |
+
header: int | Sequence[int] | None | Literal["infer"] = ...,
|
1037 |
+
names: Sequence[Hashable] | None | lib.NoDefault = ...,
|
1038 |
+
index_col: IndexLabel | Literal[False] | None = ...,
|
1039 |
+
usecols: UsecolsArgType = ...,
|
1040 |
+
dtype: DtypeArg | None = ...,
|
1041 |
+
engine: CSVEngine | None = ...,
|
1042 |
+
converters: Mapping[Hashable, Callable] | None = ...,
|
1043 |
+
true_values: list | None = ...,
|
1044 |
+
false_values: list | None = ...,
|
1045 |
+
skipinitialspace: bool = ...,
|
1046 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
|
1047 |
+
skipfooter: int = ...,
|
1048 |
+
nrows: int | None = ...,
|
1049 |
+
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
1050 |
+
keep_default_na: bool = ...,
|
1051 |
+
na_filter: bool = ...,
|
1052 |
+
verbose: bool | lib.NoDefault = ...,
|
1053 |
+
skip_blank_lines: bool = ...,
|
1054 |
+
parse_dates: bool | Sequence[Hashable] = ...,
|
1055 |
+
infer_datetime_format: bool | lib.NoDefault = ...,
|
1056 |
+
keep_date_col: bool | lib.NoDefault = ...,
|
1057 |
+
date_parser: Callable | lib.NoDefault = ...,
|
1058 |
+
date_format: str | dict[Hashable, str] | None = ...,
|
1059 |
+
dayfirst: bool = ...,
|
1060 |
+
cache_dates: bool = ...,
|
1061 |
+
iterator: Literal[True],
|
1062 |
+
chunksize: int | None = ...,
|
1063 |
+
compression: CompressionOptions = ...,
|
1064 |
+
thousands: str | None = ...,
|
1065 |
+
decimal: str = ...,
|
1066 |
+
lineterminator: str | None = ...,
|
1067 |
+
quotechar: str = ...,
|
1068 |
+
quoting: int = ...,
|
1069 |
+
doublequote: bool = ...,
|
1070 |
+
escapechar: str | None = ...,
|
1071 |
+
comment: str | None = ...,
|
1072 |
+
encoding: str | None = ...,
|
1073 |
+
encoding_errors: str | None = ...,
|
1074 |
+
dialect: str | csv.Dialect | None = ...,
|
1075 |
+
on_bad_lines=...,
|
1076 |
+
delim_whitespace: bool = ...,
|
1077 |
+
low_memory: bool = ...,
|
1078 |
+
memory_map: bool = ...,
|
1079 |
+
float_precision: str | None = ...,
|
1080 |
+
storage_options: StorageOptions = ...,
|
1081 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
1082 |
+
) -> TextFileReader:
|
1083 |
+
...
|
1084 |
+
|
1085 |
+
|
1086 |
+
# chunksize=int -> TextFileReader
|
1087 |
+
@overload
|
1088 |
+
def read_table(
|
1089 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
1090 |
+
*,
|
1091 |
+
sep: str | None | lib.NoDefault = ...,
|
1092 |
+
delimiter: str | None | lib.NoDefault = ...,
|
1093 |
+
header: int | Sequence[int] | None | Literal["infer"] = ...,
|
1094 |
+
names: Sequence[Hashable] | None | lib.NoDefault = ...,
|
1095 |
+
index_col: IndexLabel | Literal[False] | None = ...,
|
1096 |
+
usecols: UsecolsArgType = ...,
|
1097 |
+
dtype: DtypeArg | None = ...,
|
1098 |
+
engine: CSVEngine | None = ...,
|
1099 |
+
converters: Mapping[Hashable, Callable] | None = ...,
|
1100 |
+
true_values: list | None = ...,
|
1101 |
+
false_values: list | None = ...,
|
1102 |
+
skipinitialspace: bool = ...,
|
1103 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
|
1104 |
+
skipfooter: int = ...,
|
1105 |
+
nrows: int | None = ...,
|
1106 |
+
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
1107 |
+
keep_default_na: bool = ...,
|
1108 |
+
na_filter: bool = ...,
|
1109 |
+
verbose: bool | lib.NoDefault = ...,
|
1110 |
+
skip_blank_lines: bool = ...,
|
1111 |
+
parse_dates: bool | Sequence[Hashable] = ...,
|
1112 |
+
infer_datetime_format: bool | lib.NoDefault = ...,
|
1113 |
+
keep_date_col: bool | lib.NoDefault = ...,
|
1114 |
+
date_parser: Callable | lib.NoDefault = ...,
|
1115 |
+
date_format: str | dict[Hashable, str] | None = ...,
|
1116 |
+
dayfirst: bool = ...,
|
1117 |
+
cache_dates: bool = ...,
|
1118 |
+
iterator: bool = ...,
|
1119 |
+
chunksize: int,
|
1120 |
+
compression: CompressionOptions = ...,
|
1121 |
+
thousands: str | None = ...,
|
1122 |
+
decimal: str = ...,
|
1123 |
+
lineterminator: str | None = ...,
|
1124 |
+
quotechar: str = ...,
|
1125 |
+
quoting: int = ...,
|
1126 |
+
doublequote: bool = ...,
|
1127 |
+
escapechar: str | None = ...,
|
1128 |
+
comment: str | None = ...,
|
1129 |
+
encoding: str | None = ...,
|
1130 |
+
encoding_errors: str | None = ...,
|
1131 |
+
dialect: str | csv.Dialect | None = ...,
|
1132 |
+
on_bad_lines=...,
|
1133 |
+
delim_whitespace: bool = ...,
|
1134 |
+
low_memory: bool = ...,
|
1135 |
+
memory_map: bool = ...,
|
1136 |
+
float_precision: str | None = ...,
|
1137 |
+
storage_options: StorageOptions = ...,
|
1138 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
1139 |
+
) -> TextFileReader:
|
1140 |
+
...
|
1141 |
+
|
1142 |
+
|
1143 |
+
# default -> DataFrame
|
1144 |
+
@overload
|
1145 |
+
def read_table(
|
1146 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
1147 |
+
*,
|
1148 |
+
sep: str | None | lib.NoDefault = ...,
|
1149 |
+
delimiter: str | None | lib.NoDefault = ...,
|
1150 |
+
header: int | Sequence[int] | None | Literal["infer"] = ...,
|
1151 |
+
names: Sequence[Hashable] | None | lib.NoDefault = ...,
|
1152 |
+
index_col: IndexLabel | Literal[False] | None = ...,
|
1153 |
+
usecols: UsecolsArgType = ...,
|
1154 |
+
dtype: DtypeArg | None = ...,
|
1155 |
+
engine: CSVEngine | None = ...,
|
1156 |
+
converters: Mapping[Hashable, Callable] | None = ...,
|
1157 |
+
true_values: list | None = ...,
|
1158 |
+
false_values: list | None = ...,
|
1159 |
+
skipinitialspace: bool = ...,
|
1160 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
|
1161 |
+
skipfooter: int = ...,
|
1162 |
+
nrows: int | None = ...,
|
1163 |
+
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
1164 |
+
keep_default_na: bool = ...,
|
1165 |
+
na_filter: bool = ...,
|
1166 |
+
verbose: bool | lib.NoDefault = ...,
|
1167 |
+
skip_blank_lines: bool = ...,
|
1168 |
+
parse_dates: bool | Sequence[Hashable] = ...,
|
1169 |
+
infer_datetime_format: bool | lib.NoDefault = ...,
|
1170 |
+
keep_date_col: bool | lib.NoDefault = ...,
|
1171 |
+
date_parser: Callable | lib.NoDefault = ...,
|
1172 |
+
date_format: str | dict[Hashable, str] | None = ...,
|
1173 |
+
dayfirst: bool = ...,
|
1174 |
+
cache_dates: bool = ...,
|
1175 |
+
iterator: Literal[False] = ...,
|
1176 |
+
chunksize: None = ...,
|
1177 |
+
compression: CompressionOptions = ...,
|
1178 |
+
thousands: str | None = ...,
|
1179 |
+
decimal: str = ...,
|
1180 |
+
lineterminator: str | None = ...,
|
1181 |
+
quotechar: str = ...,
|
1182 |
+
quoting: int = ...,
|
1183 |
+
doublequote: bool = ...,
|
1184 |
+
escapechar: str | None = ...,
|
1185 |
+
comment: str | None = ...,
|
1186 |
+
encoding: str | None = ...,
|
1187 |
+
encoding_errors: str | None = ...,
|
1188 |
+
dialect: str | csv.Dialect | None = ...,
|
1189 |
+
on_bad_lines=...,
|
1190 |
+
delim_whitespace: bool = ...,
|
1191 |
+
low_memory: bool = ...,
|
1192 |
+
memory_map: bool = ...,
|
1193 |
+
float_precision: str | None = ...,
|
1194 |
+
storage_options: StorageOptions = ...,
|
1195 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
1196 |
+
) -> DataFrame:
|
1197 |
+
...
|
1198 |
+
|
1199 |
+
|
1200 |
+
# Unions -> DataFrame | TextFileReader
|
1201 |
+
@overload
|
1202 |
+
def read_table(
|
1203 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
1204 |
+
*,
|
1205 |
+
sep: str | None | lib.NoDefault = ...,
|
1206 |
+
delimiter: str | None | lib.NoDefault = ...,
|
1207 |
+
header: int | Sequence[int] | None | Literal["infer"] = ...,
|
1208 |
+
names: Sequence[Hashable] | None | lib.NoDefault = ...,
|
1209 |
+
index_col: IndexLabel | Literal[False] | None = ...,
|
1210 |
+
usecols: UsecolsArgType = ...,
|
1211 |
+
dtype: DtypeArg | None = ...,
|
1212 |
+
engine: CSVEngine | None = ...,
|
1213 |
+
converters: Mapping[Hashable, Callable] | None = ...,
|
1214 |
+
true_values: list | None = ...,
|
1215 |
+
false_values: list | None = ...,
|
1216 |
+
skipinitialspace: bool = ...,
|
1217 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
|
1218 |
+
skipfooter: int = ...,
|
1219 |
+
nrows: int | None = ...,
|
1220 |
+
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
|
1221 |
+
keep_default_na: bool = ...,
|
1222 |
+
na_filter: bool = ...,
|
1223 |
+
verbose: bool | lib.NoDefault = ...,
|
1224 |
+
skip_blank_lines: bool = ...,
|
1225 |
+
parse_dates: bool | Sequence[Hashable] = ...,
|
1226 |
+
infer_datetime_format: bool | lib.NoDefault = ...,
|
1227 |
+
keep_date_col: bool | lib.NoDefault = ...,
|
1228 |
+
date_parser: Callable | lib.NoDefault = ...,
|
1229 |
+
date_format: str | dict[Hashable, str] | None = ...,
|
1230 |
+
dayfirst: bool = ...,
|
1231 |
+
cache_dates: bool = ...,
|
1232 |
+
iterator: bool = ...,
|
1233 |
+
chunksize: int | None = ...,
|
1234 |
+
compression: CompressionOptions = ...,
|
1235 |
+
thousands: str | None = ...,
|
1236 |
+
decimal: str = ...,
|
1237 |
+
lineterminator: str | None = ...,
|
1238 |
+
quotechar: str = ...,
|
1239 |
+
quoting: int = ...,
|
1240 |
+
doublequote: bool = ...,
|
1241 |
+
escapechar: str | None = ...,
|
1242 |
+
comment: str | None = ...,
|
1243 |
+
encoding: str | None = ...,
|
1244 |
+
encoding_errors: str | None = ...,
|
1245 |
+
dialect: str | csv.Dialect | None = ...,
|
1246 |
+
on_bad_lines=...,
|
1247 |
+
delim_whitespace: bool = ...,
|
1248 |
+
low_memory: bool = ...,
|
1249 |
+
memory_map: bool = ...,
|
1250 |
+
float_precision: str | None = ...,
|
1251 |
+
storage_options: StorageOptions = ...,
|
1252 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
1253 |
+
) -> DataFrame | TextFileReader:
|
1254 |
+
...
|
1255 |
+
|
1256 |
+
|
1257 |
+
@Appender(
|
1258 |
+
_doc_read_csv_and_table.format(
|
1259 |
+
func_name="read_table",
|
1260 |
+
summary="Read general delimited file into DataFrame.",
|
1261 |
+
see_also_func_name="read_csv",
|
1262 |
+
see_also_func_summary=(
|
1263 |
+
"Read a comma-separated values (csv) file into DataFrame."
|
1264 |
+
),
|
1265 |
+
_default_sep=r"'\\t' (tab-stop)",
|
1266 |
+
storage_options=_shared_docs["storage_options"],
|
1267 |
+
decompression_options=_shared_docs["decompression_options"]
|
1268 |
+
% "filepath_or_buffer",
|
1269 |
+
)
|
1270 |
+
)
|
1271 |
+
def read_table(
|
1272 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
1273 |
+
*,
|
1274 |
+
sep: str | None | lib.NoDefault = lib.no_default,
|
1275 |
+
delimiter: str | None | lib.NoDefault = None,
|
1276 |
+
# Column and Index Locations and Names
|
1277 |
+
header: int | Sequence[int] | None | Literal["infer"] = "infer",
|
1278 |
+
names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,
|
1279 |
+
index_col: IndexLabel | Literal[False] | None = None,
|
1280 |
+
usecols: UsecolsArgType = None,
|
1281 |
+
# General Parsing Configuration
|
1282 |
+
dtype: DtypeArg | None = None,
|
1283 |
+
engine: CSVEngine | None = None,
|
1284 |
+
converters: Mapping[Hashable, Callable] | None = None,
|
1285 |
+
true_values: list | None = None,
|
1286 |
+
false_values: list | None = None,
|
1287 |
+
skipinitialspace: bool = False,
|
1288 |
+
skiprows: list[int] | int | Callable[[Hashable], bool] | None = None,
|
1289 |
+
skipfooter: int = 0,
|
1290 |
+
nrows: int | None = None,
|
1291 |
+
# NA and Missing Data Handling
|
1292 |
+
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None,
|
1293 |
+
keep_default_na: bool = True,
|
1294 |
+
na_filter: bool = True,
|
1295 |
+
verbose: bool | lib.NoDefault = lib.no_default,
|
1296 |
+
skip_blank_lines: bool = True,
|
1297 |
+
# Datetime Handling
|
1298 |
+
parse_dates: bool | Sequence[Hashable] = False,
|
1299 |
+
infer_datetime_format: bool | lib.NoDefault = lib.no_default,
|
1300 |
+
keep_date_col: bool | lib.NoDefault = lib.no_default,
|
1301 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
1302 |
+
date_format: str | dict[Hashable, str] | None = None,
|
1303 |
+
dayfirst: bool = False,
|
1304 |
+
cache_dates: bool = True,
|
1305 |
+
# Iteration
|
1306 |
+
iterator: bool = False,
|
1307 |
+
chunksize: int | None = None,
|
1308 |
+
# Quoting, Compression, and File Format
|
1309 |
+
compression: CompressionOptions = "infer",
|
1310 |
+
thousands: str | None = None,
|
1311 |
+
decimal: str = ".",
|
1312 |
+
lineterminator: str | None = None,
|
1313 |
+
quotechar: str = '"',
|
1314 |
+
quoting: int = csv.QUOTE_MINIMAL,
|
1315 |
+
doublequote: bool = True,
|
1316 |
+
escapechar: str | None = None,
|
1317 |
+
comment: str | None = None,
|
1318 |
+
encoding: str | None = None,
|
1319 |
+
encoding_errors: str | None = "strict",
|
1320 |
+
dialect: str | csv.Dialect | None = None,
|
1321 |
+
# Error Handling
|
1322 |
+
on_bad_lines: str = "error",
|
1323 |
+
# Internal
|
1324 |
+
delim_whitespace: bool | lib.NoDefault = lib.no_default,
|
1325 |
+
low_memory: bool = _c_parser_defaults["low_memory"],
|
1326 |
+
memory_map: bool = False,
|
1327 |
+
float_precision: str | None = None,
|
1328 |
+
storage_options: StorageOptions | None = None,
|
1329 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
1330 |
+
) -> DataFrame | TextFileReader:
|
1331 |
+
if keep_date_col is not lib.no_default:
|
1332 |
+
# GH#55569
|
1333 |
+
warnings.warn(
|
1334 |
+
"The 'keep_date_col' keyword in pd.read_table is deprecated and "
|
1335 |
+
"will be removed in a future version. Explicitly remove unwanted "
|
1336 |
+
"columns after parsing instead.",
|
1337 |
+
FutureWarning,
|
1338 |
+
stacklevel=find_stack_level(),
|
1339 |
+
)
|
1340 |
+
else:
|
1341 |
+
keep_date_col = False
|
1342 |
+
|
1343 |
+
# error: Item "bool" of "bool | Sequence[Hashable]" has no attribute "__iter__"
|
1344 |
+
if lib.is_list_like(parse_dates) and not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr]
|
1345 |
+
# GH#55569
|
1346 |
+
warnings.warn(
|
1347 |
+
"Support for nested sequences for 'parse_dates' in pd.read_table "
|
1348 |
+
"is deprecated. Combine the desired columns with pd.to_datetime "
|
1349 |
+
"after parsing instead.",
|
1350 |
+
FutureWarning,
|
1351 |
+
stacklevel=find_stack_level(),
|
1352 |
+
)
|
1353 |
+
|
1354 |
+
if infer_datetime_format is not lib.no_default:
|
1355 |
+
warnings.warn(
|
1356 |
+
"The argument 'infer_datetime_format' is deprecated and will "
|
1357 |
+
"be removed in a future version. "
|
1358 |
+
"A strict version of it is now the default, see "
|
1359 |
+
"https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
|
1360 |
+
"You can safely remove this argument.",
|
1361 |
+
FutureWarning,
|
1362 |
+
stacklevel=find_stack_level(),
|
1363 |
+
)
|
1364 |
+
|
1365 |
+
if delim_whitespace is not lib.no_default:
|
1366 |
+
# GH#55569
|
1367 |
+
warnings.warn(
|
1368 |
+
"The 'delim_whitespace' keyword in pd.read_table is deprecated and "
|
1369 |
+
"will be removed in a future version. Use ``sep='\\s+'`` instead",
|
1370 |
+
FutureWarning,
|
1371 |
+
stacklevel=find_stack_level(),
|
1372 |
+
)
|
1373 |
+
else:
|
1374 |
+
delim_whitespace = False
|
1375 |
+
|
1376 |
+
if verbose is not lib.no_default:
|
1377 |
+
# GH#55569
|
1378 |
+
warnings.warn(
|
1379 |
+
"The 'verbose' keyword in pd.read_table is deprecated and "
|
1380 |
+
"will be removed in a future version.",
|
1381 |
+
FutureWarning,
|
1382 |
+
stacklevel=find_stack_level(),
|
1383 |
+
)
|
1384 |
+
else:
|
1385 |
+
verbose = False
|
1386 |
+
|
1387 |
+
# locals() should never be modified
|
1388 |
+
kwds = locals().copy()
|
1389 |
+
del kwds["filepath_or_buffer"]
|
1390 |
+
del kwds["sep"]
|
1391 |
+
|
1392 |
+
kwds_defaults = _refine_defaults_read(
|
1393 |
+
dialect,
|
1394 |
+
delimiter,
|
1395 |
+
delim_whitespace,
|
1396 |
+
engine,
|
1397 |
+
sep,
|
1398 |
+
on_bad_lines,
|
1399 |
+
names,
|
1400 |
+
defaults={"delimiter": "\t"},
|
1401 |
+
dtype_backend=dtype_backend,
|
1402 |
+
)
|
1403 |
+
kwds.update(kwds_defaults)
|
1404 |
+
|
1405 |
+
return _read(filepath_or_buffer, kwds)
|
1406 |
+
|
1407 |
+
|
1408 |
+
@overload
|
1409 |
+
def read_fwf(
|
1410 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
1411 |
+
*,
|
1412 |
+
colspecs: Sequence[tuple[int, int]] | str | None = ...,
|
1413 |
+
widths: Sequence[int] | None = ...,
|
1414 |
+
infer_nrows: int = ...,
|
1415 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
1416 |
+
iterator: Literal[True],
|
1417 |
+
chunksize: int | None = ...,
|
1418 |
+
**kwds,
|
1419 |
+
) -> TextFileReader:
|
1420 |
+
...
|
1421 |
+
|
1422 |
+
|
1423 |
+
@overload
|
1424 |
+
def read_fwf(
|
1425 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
1426 |
+
*,
|
1427 |
+
colspecs: Sequence[tuple[int, int]] | str | None = ...,
|
1428 |
+
widths: Sequence[int] | None = ...,
|
1429 |
+
infer_nrows: int = ...,
|
1430 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
1431 |
+
iterator: bool = ...,
|
1432 |
+
chunksize: int,
|
1433 |
+
**kwds,
|
1434 |
+
) -> TextFileReader:
|
1435 |
+
...
|
1436 |
+
|
1437 |
+
|
1438 |
+
@overload
|
1439 |
+
def read_fwf(
|
1440 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
1441 |
+
*,
|
1442 |
+
colspecs: Sequence[tuple[int, int]] | str | None = ...,
|
1443 |
+
widths: Sequence[int] | None = ...,
|
1444 |
+
infer_nrows: int = ...,
|
1445 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
1446 |
+
iterator: Literal[False] = ...,
|
1447 |
+
chunksize: None = ...,
|
1448 |
+
**kwds,
|
1449 |
+
) -> DataFrame:
|
1450 |
+
...
|
1451 |
+
|
1452 |
+
|
1453 |
+
def read_fwf(
|
1454 |
+
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
|
1455 |
+
*,
|
1456 |
+
colspecs: Sequence[tuple[int, int]] | str | None = "infer",
|
1457 |
+
widths: Sequence[int] | None = None,
|
1458 |
+
infer_nrows: int = 100,
|
1459 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
1460 |
+
iterator: bool = False,
|
1461 |
+
chunksize: int | None = None,
|
1462 |
+
**kwds,
|
1463 |
+
) -> DataFrame | TextFileReader:
|
1464 |
+
r"""
|
1465 |
+
Read a table of fixed-width formatted lines into DataFrame.
|
1466 |
+
|
1467 |
+
Also supports optionally iterating or breaking of the file
|
1468 |
+
into chunks.
|
1469 |
+
|
1470 |
+
Additional help can be found in the `online docs for IO Tools
|
1471 |
+
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
|
1472 |
+
|
1473 |
+
Parameters
|
1474 |
+
----------
|
1475 |
+
filepath_or_buffer : str, path object, or file-like object
|
1476 |
+
String, path object (implementing ``os.PathLike[str]``), or file-like
|
1477 |
+
object implementing a text ``read()`` function.The string could be a URL.
|
1478 |
+
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
|
1479 |
+
expected. A local file could be:
|
1480 |
+
``file://localhost/path/to/table.csv``.
|
1481 |
+
colspecs : list of tuple (int, int) or 'infer'. optional
|
1482 |
+
A list of tuples giving the extents of the fixed-width
|
1483 |
+
fields of each line as half-open intervals (i.e., [from, to[ ).
|
1484 |
+
String value 'infer' can be used to instruct the parser to try
|
1485 |
+
detecting the column specifications from the first 100 rows of
|
1486 |
+
the data which are not being skipped via skiprows (default='infer').
|
1487 |
+
widths : list of int, optional
|
1488 |
+
A list of field widths which can be used instead of 'colspecs' if
|
1489 |
+
the intervals are contiguous.
|
1490 |
+
infer_nrows : int, default 100
|
1491 |
+
The number of rows to consider when letting the parser determine the
|
1492 |
+
`colspecs`.
|
1493 |
+
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
|
1494 |
+
Back-end data type applied to the resultant :class:`DataFrame`
|
1495 |
+
(still experimental). Behaviour is as follows:
|
1496 |
+
|
1497 |
+
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
|
1498 |
+
(default).
|
1499 |
+
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
|
1500 |
+
DataFrame.
|
1501 |
+
|
1502 |
+
.. versionadded:: 2.0
|
1503 |
+
|
1504 |
+
**kwds : optional
|
1505 |
+
Optional keyword arguments can be passed to ``TextFileReader``.
|
1506 |
+
|
1507 |
+
Returns
|
1508 |
+
-------
|
1509 |
+
DataFrame or TextFileReader
|
1510 |
+
A comma-separated values (csv) file is returned as two-dimensional
|
1511 |
+
data structure with labeled axes.
|
1512 |
+
|
1513 |
+
See Also
|
1514 |
+
--------
|
1515 |
+
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
|
1516 |
+
read_csv : Read a comma-separated values (csv) file into DataFrame.
|
1517 |
+
|
1518 |
+
Examples
|
1519 |
+
--------
|
1520 |
+
>>> pd.read_fwf('data.csv') # doctest: +SKIP
|
1521 |
+
"""
|
1522 |
+
# Check input arguments.
|
1523 |
+
if colspecs is None and widths is None:
|
1524 |
+
raise ValueError("Must specify either colspecs or widths")
|
1525 |
+
if colspecs not in (None, "infer") and widths is not None:
|
1526 |
+
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
|
1527 |
+
|
1528 |
+
# Compute 'colspecs' from 'widths', if specified.
|
1529 |
+
if widths is not None:
|
1530 |
+
colspecs, col = [], 0
|
1531 |
+
for w in widths:
|
1532 |
+
colspecs.append((col, col + w))
|
1533 |
+
col += w
|
1534 |
+
|
1535 |
+
# for mypy
|
1536 |
+
assert colspecs is not None
|
1537 |
+
|
1538 |
+
# GH#40830
|
1539 |
+
# Ensure length of `colspecs` matches length of `names`
|
1540 |
+
names = kwds.get("names")
|
1541 |
+
if names is not None:
|
1542 |
+
if len(names) != len(colspecs) and colspecs != "infer":
|
1543 |
+
# need to check len(index_col) as it might contain
|
1544 |
+
# unnamed indices, in which case it's name is not required
|
1545 |
+
len_index = 0
|
1546 |
+
if kwds.get("index_col") is not None:
|
1547 |
+
index_col: Any = kwds.get("index_col")
|
1548 |
+
if index_col is not False:
|
1549 |
+
if not is_list_like(index_col):
|
1550 |
+
len_index = 1
|
1551 |
+
else:
|
1552 |
+
len_index = len(index_col)
|
1553 |
+
if kwds.get("usecols") is None and len(names) + len_index != len(colspecs):
|
1554 |
+
# If usecols is used colspec may be longer than names
|
1555 |
+
raise ValueError("Length of colspecs must match length of names")
|
1556 |
+
|
1557 |
+
kwds["colspecs"] = colspecs
|
1558 |
+
kwds["infer_nrows"] = infer_nrows
|
1559 |
+
kwds["engine"] = "python-fwf"
|
1560 |
+
kwds["iterator"] = iterator
|
1561 |
+
kwds["chunksize"] = chunksize
|
1562 |
+
|
1563 |
+
check_dtype_backend(dtype_backend)
|
1564 |
+
kwds["dtype_backend"] = dtype_backend
|
1565 |
+
return _read(filepath_or_buffer, kwds)
|
1566 |
+
|
1567 |
+
|
1568 |
+
class TextFileReader(abc.Iterator):
|
1569 |
+
"""
|
1570 |
+
|
1571 |
+
Passed dialect overrides any of the related parser options
|
1572 |
+
|
1573 |
+
"""
|
1574 |
+
|
1575 |
+
def __init__(
|
1576 |
+
self,
|
1577 |
+
f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list,
|
1578 |
+
engine: CSVEngine | None = None,
|
1579 |
+
**kwds,
|
1580 |
+
) -> None:
|
1581 |
+
if engine is not None:
|
1582 |
+
engine_specified = True
|
1583 |
+
else:
|
1584 |
+
engine = "python"
|
1585 |
+
engine_specified = False
|
1586 |
+
self.engine = engine
|
1587 |
+
self._engine_specified = kwds.get("engine_specified", engine_specified)
|
1588 |
+
|
1589 |
+
_validate_skipfooter(kwds)
|
1590 |
+
|
1591 |
+
dialect = _extract_dialect(kwds)
|
1592 |
+
if dialect is not None:
|
1593 |
+
if engine == "pyarrow":
|
1594 |
+
raise ValueError(
|
1595 |
+
"The 'dialect' option is not supported with the 'pyarrow' engine"
|
1596 |
+
)
|
1597 |
+
kwds = _merge_with_dialect_properties(dialect, kwds)
|
1598 |
+
|
1599 |
+
if kwds.get("header", "infer") == "infer":
|
1600 |
+
kwds["header"] = 0 if kwds.get("names") is None else None
|
1601 |
+
|
1602 |
+
self.orig_options = kwds
|
1603 |
+
|
1604 |
+
# miscellanea
|
1605 |
+
self._currow = 0
|
1606 |
+
|
1607 |
+
options = self._get_options_with_defaults(engine)
|
1608 |
+
options["storage_options"] = kwds.get("storage_options", None)
|
1609 |
+
|
1610 |
+
self.chunksize = options.pop("chunksize", None)
|
1611 |
+
self.nrows = options.pop("nrows", None)
|
1612 |
+
|
1613 |
+
self._check_file_or_buffer(f, engine)
|
1614 |
+
self.options, self.engine = self._clean_options(options, engine)
|
1615 |
+
|
1616 |
+
if "has_index_names" in kwds:
|
1617 |
+
self.options["has_index_names"] = kwds["has_index_names"]
|
1618 |
+
|
1619 |
+
self.handles: IOHandles | None = None
|
1620 |
+
self._engine = self._make_engine(f, self.engine)
|
1621 |
+
|
1622 |
+
def close(self) -> None:
|
1623 |
+
if self.handles is not None:
|
1624 |
+
self.handles.close()
|
1625 |
+
self._engine.close()
|
1626 |
+
|
1627 |
+
def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]:
|
1628 |
+
kwds = self.orig_options
|
1629 |
+
|
1630 |
+
options = {}
|
1631 |
+
default: object | None
|
1632 |
+
|
1633 |
+
for argname, default in parser_defaults.items():
|
1634 |
+
value = kwds.get(argname, default)
|
1635 |
+
|
1636 |
+
# see gh-12935
|
1637 |
+
if (
|
1638 |
+
engine == "pyarrow"
|
1639 |
+
and argname in _pyarrow_unsupported
|
1640 |
+
and value != default
|
1641 |
+
and value != getattr(value, "value", default)
|
1642 |
+
):
|
1643 |
+
raise ValueError(
|
1644 |
+
f"The {repr(argname)} option is not supported with the "
|
1645 |
+
f"'pyarrow' engine"
|
1646 |
+
)
|
1647 |
+
options[argname] = value
|
1648 |
+
|
1649 |
+
for argname, default in _c_parser_defaults.items():
|
1650 |
+
if argname in kwds:
|
1651 |
+
value = kwds[argname]
|
1652 |
+
|
1653 |
+
if engine != "c" and value != default:
|
1654 |
+
# TODO: Refactor this logic, its pretty convoluted
|
1655 |
+
if "python" in engine and argname not in _python_unsupported:
|
1656 |
+
pass
|
1657 |
+
elif "pyarrow" in engine and argname not in _pyarrow_unsupported:
|
1658 |
+
pass
|
1659 |
+
else:
|
1660 |
+
raise ValueError(
|
1661 |
+
f"The {repr(argname)} option is not supported with the "
|
1662 |
+
f"{repr(engine)} engine"
|
1663 |
+
)
|
1664 |
+
else:
|
1665 |
+
value = default
|
1666 |
+
options[argname] = value
|
1667 |
+
|
1668 |
+
if engine == "python-fwf":
|
1669 |
+
for argname, default in _fwf_defaults.items():
|
1670 |
+
options[argname] = kwds.get(argname, default)
|
1671 |
+
|
1672 |
+
return options
|
1673 |
+
|
1674 |
+
def _check_file_or_buffer(self, f, engine: CSVEngine) -> None:
|
1675 |
+
# see gh-16530
|
1676 |
+
if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"):
|
1677 |
+
# The C engine doesn't need the file-like to have the "__iter__"
|
1678 |
+
# attribute. However, the Python engine needs "__iter__(...)"
|
1679 |
+
# when iterating through such an object, meaning it
|
1680 |
+
# needs to have that attribute
|
1681 |
+
raise ValueError(
|
1682 |
+
"The 'python' engine cannot iterate through this file buffer."
|
1683 |
+
)
|
1684 |
+
|
1685 |
+
def _clean_options(
|
1686 |
+
self, options: dict[str, Any], engine: CSVEngine
|
1687 |
+
) -> tuple[dict[str, Any], CSVEngine]:
|
1688 |
+
result = options.copy()
|
1689 |
+
|
1690 |
+
fallback_reason = None
|
1691 |
+
|
1692 |
+
# C engine not supported yet
|
1693 |
+
if engine == "c":
|
1694 |
+
if options["skipfooter"] > 0:
|
1695 |
+
fallback_reason = "the 'c' engine does not support skipfooter"
|
1696 |
+
engine = "python"
|
1697 |
+
|
1698 |
+
sep = options["delimiter"]
|
1699 |
+
delim_whitespace = options["delim_whitespace"]
|
1700 |
+
|
1701 |
+
if sep is None and not delim_whitespace:
|
1702 |
+
if engine in ("c", "pyarrow"):
|
1703 |
+
fallback_reason = (
|
1704 |
+
f"the '{engine}' engine does not support "
|
1705 |
+
"sep=None with delim_whitespace=False"
|
1706 |
+
)
|
1707 |
+
engine = "python"
|
1708 |
+
elif sep is not None and len(sep) > 1:
|
1709 |
+
if engine == "c" and sep == r"\s+":
|
1710 |
+
result["delim_whitespace"] = True
|
1711 |
+
del result["delimiter"]
|
1712 |
+
elif engine not in ("python", "python-fwf"):
|
1713 |
+
# wait until regex engine integrated
|
1714 |
+
fallback_reason = (
|
1715 |
+
f"the '{engine}' engine does not support "
|
1716 |
+
"regex separators (separators > 1 char and "
|
1717 |
+
r"different from '\s+' are interpreted as regex)"
|
1718 |
+
)
|
1719 |
+
engine = "python"
|
1720 |
+
elif delim_whitespace:
|
1721 |
+
if "python" in engine:
|
1722 |
+
result["delimiter"] = r"\s+"
|
1723 |
+
elif sep is not None:
|
1724 |
+
encodeable = True
|
1725 |
+
encoding = sys.getfilesystemencoding() or "utf-8"
|
1726 |
+
try:
|
1727 |
+
if len(sep.encode(encoding)) > 1:
|
1728 |
+
encodeable = False
|
1729 |
+
except UnicodeDecodeError:
|
1730 |
+
encodeable = False
|
1731 |
+
if not encodeable and engine not in ("python", "python-fwf"):
|
1732 |
+
fallback_reason = (
|
1733 |
+
f"the separator encoded in {encoding} "
|
1734 |
+
f"is > 1 char long, and the '{engine}' engine "
|
1735 |
+
"does not support such separators"
|
1736 |
+
)
|
1737 |
+
engine = "python"
|
1738 |
+
|
1739 |
+
quotechar = options["quotechar"]
|
1740 |
+
if quotechar is not None and isinstance(quotechar, (str, bytes)):
|
1741 |
+
if (
|
1742 |
+
len(quotechar) == 1
|
1743 |
+
and ord(quotechar) > 127
|
1744 |
+
and engine not in ("python", "python-fwf")
|
1745 |
+
):
|
1746 |
+
fallback_reason = (
|
1747 |
+
"ord(quotechar) > 127, meaning the "
|
1748 |
+
"quotechar is larger than one byte, "
|
1749 |
+
f"and the '{engine}' engine does not support such quotechars"
|
1750 |
+
)
|
1751 |
+
engine = "python"
|
1752 |
+
|
1753 |
+
if fallback_reason and self._engine_specified:
|
1754 |
+
raise ValueError(fallback_reason)
|
1755 |
+
|
1756 |
+
if engine == "c":
|
1757 |
+
for arg in _c_unsupported:
|
1758 |
+
del result[arg]
|
1759 |
+
|
1760 |
+
if "python" in engine:
|
1761 |
+
for arg in _python_unsupported:
|
1762 |
+
if fallback_reason and result[arg] != _c_parser_defaults.get(arg):
|
1763 |
+
raise ValueError(
|
1764 |
+
"Falling back to the 'python' engine because "
|
1765 |
+
f"{fallback_reason}, but this causes {repr(arg)} to be "
|
1766 |
+
"ignored as it is not supported by the 'python' engine."
|
1767 |
+
)
|
1768 |
+
del result[arg]
|
1769 |
+
|
1770 |
+
if fallback_reason:
|
1771 |
+
warnings.warn(
|
1772 |
+
(
|
1773 |
+
"Falling back to the 'python' engine because "
|
1774 |
+
f"{fallback_reason}; you can avoid this warning by specifying "
|
1775 |
+
"engine='python'."
|
1776 |
+
),
|
1777 |
+
ParserWarning,
|
1778 |
+
stacklevel=find_stack_level(),
|
1779 |
+
)
|
1780 |
+
|
1781 |
+
index_col = options["index_col"]
|
1782 |
+
names = options["names"]
|
1783 |
+
converters = options["converters"]
|
1784 |
+
na_values = options["na_values"]
|
1785 |
+
skiprows = options["skiprows"]
|
1786 |
+
|
1787 |
+
validate_header_arg(options["header"])
|
1788 |
+
|
1789 |
+
if index_col is True:
|
1790 |
+
raise ValueError("The value of index_col couldn't be 'True'")
|
1791 |
+
if is_index_col(index_col):
|
1792 |
+
if not isinstance(index_col, (list, tuple, np.ndarray)):
|
1793 |
+
index_col = [index_col]
|
1794 |
+
result["index_col"] = index_col
|
1795 |
+
|
1796 |
+
names = list(names) if names is not None else names
|
1797 |
+
|
1798 |
+
# type conversion-related
|
1799 |
+
if converters is not None:
|
1800 |
+
if not isinstance(converters, dict):
|
1801 |
+
raise TypeError(
|
1802 |
+
"Type converters must be a dict or subclass, "
|
1803 |
+
f"input was a {type(converters).__name__}"
|
1804 |
+
)
|
1805 |
+
else:
|
1806 |
+
converters = {}
|
1807 |
+
|
1808 |
+
# Converting values to NA
|
1809 |
+
keep_default_na = options["keep_default_na"]
|
1810 |
+
floatify = engine != "pyarrow"
|
1811 |
+
na_values, na_fvalues = _clean_na_values(
|
1812 |
+
na_values, keep_default_na, floatify=floatify
|
1813 |
+
)
|
1814 |
+
|
1815 |
+
# handle skiprows; this is internally handled by the
|
1816 |
+
# c-engine, so only need for python and pyarrow parsers
|
1817 |
+
if engine == "pyarrow":
|
1818 |
+
if not is_integer(skiprows) and skiprows is not None:
|
1819 |
+
# pyarrow expects skiprows to be passed as an integer
|
1820 |
+
raise ValueError(
|
1821 |
+
"skiprows argument must be an integer when using "
|
1822 |
+
"engine='pyarrow'"
|
1823 |
+
)
|
1824 |
+
else:
|
1825 |
+
if is_integer(skiprows):
|
1826 |
+
skiprows = list(range(skiprows))
|
1827 |
+
if skiprows is None:
|
1828 |
+
skiprows = set()
|
1829 |
+
elif not callable(skiprows):
|
1830 |
+
skiprows = set(skiprows)
|
1831 |
+
|
1832 |
+
# put stuff back
|
1833 |
+
result["names"] = names
|
1834 |
+
result["converters"] = converters
|
1835 |
+
result["na_values"] = na_values
|
1836 |
+
result["na_fvalues"] = na_fvalues
|
1837 |
+
result["skiprows"] = skiprows
|
1838 |
+
|
1839 |
+
return result, engine
|
1840 |
+
|
1841 |
+
def __next__(self) -> DataFrame:
|
1842 |
+
try:
|
1843 |
+
return self.get_chunk()
|
1844 |
+
except StopIteration:
|
1845 |
+
self.close()
|
1846 |
+
raise
|
1847 |
+
|
1848 |
+
def _make_engine(
|
1849 |
+
self,
|
1850 |
+
f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list | IO,
|
1851 |
+
engine: CSVEngine = "c",
|
1852 |
+
) -> ParserBase:
|
1853 |
+
mapping: dict[str, type[ParserBase]] = {
|
1854 |
+
"c": CParserWrapper,
|
1855 |
+
"python": PythonParser,
|
1856 |
+
"pyarrow": ArrowParserWrapper,
|
1857 |
+
"python-fwf": FixedWidthFieldParser,
|
1858 |
+
}
|
1859 |
+
if engine not in mapping:
|
1860 |
+
raise ValueError(
|
1861 |
+
f"Unknown engine: {engine} (valid options are {mapping.keys()})"
|
1862 |
+
)
|
1863 |
+
if not isinstance(f, list):
|
1864 |
+
# open file here
|
1865 |
+
is_text = True
|
1866 |
+
mode = "r"
|
1867 |
+
if engine == "pyarrow":
|
1868 |
+
is_text = False
|
1869 |
+
mode = "rb"
|
1870 |
+
elif (
|
1871 |
+
engine == "c"
|
1872 |
+
and self.options.get("encoding", "utf-8") == "utf-8"
|
1873 |
+
and isinstance(stringify_path(f), str)
|
1874 |
+
):
|
1875 |
+
# c engine can decode utf-8 bytes, adding TextIOWrapper makes
|
1876 |
+
# the c-engine especially for memory_map=True far slower
|
1877 |
+
is_text = False
|
1878 |
+
if "b" not in mode:
|
1879 |
+
mode += "b"
|
1880 |
+
self.handles = get_handle(
|
1881 |
+
f,
|
1882 |
+
mode,
|
1883 |
+
encoding=self.options.get("encoding", None),
|
1884 |
+
compression=self.options.get("compression", None),
|
1885 |
+
memory_map=self.options.get("memory_map", False),
|
1886 |
+
is_text=is_text,
|
1887 |
+
errors=self.options.get("encoding_errors", "strict"),
|
1888 |
+
storage_options=self.options.get("storage_options", None),
|
1889 |
+
)
|
1890 |
+
assert self.handles is not None
|
1891 |
+
f = self.handles.handle
|
1892 |
+
|
1893 |
+
elif engine != "python":
|
1894 |
+
msg = f"Invalid file path or buffer object type: {type(f)}"
|
1895 |
+
raise ValueError(msg)
|
1896 |
+
|
1897 |
+
try:
|
1898 |
+
return mapping[engine](f, **self.options)
|
1899 |
+
except Exception:
|
1900 |
+
if self.handles is not None:
|
1901 |
+
self.handles.close()
|
1902 |
+
raise
|
1903 |
+
|
1904 |
+
def _failover_to_python(self) -> None:
|
1905 |
+
raise AbstractMethodError(self)
|
1906 |
+
|
1907 |
+
def read(self, nrows: int | None = None) -> DataFrame:
|
1908 |
+
if self.engine == "pyarrow":
|
1909 |
+
try:
|
1910 |
+
# error: "ParserBase" has no attribute "read"
|
1911 |
+
df = self._engine.read() # type: ignore[attr-defined]
|
1912 |
+
except Exception:
|
1913 |
+
self.close()
|
1914 |
+
raise
|
1915 |
+
else:
|
1916 |
+
nrows = validate_integer("nrows", nrows)
|
1917 |
+
try:
|
1918 |
+
# error: "ParserBase" has no attribute "read"
|
1919 |
+
(
|
1920 |
+
index,
|
1921 |
+
columns,
|
1922 |
+
col_dict,
|
1923 |
+
) = self._engine.read( # type: ignore[attr-defined]
|
1924 |
+
nrows
|
1925 |
+
)
|
1926 |
+
except Exception:
|
1927 |
+
self.close()
|
1928 |
+
raise
|
1929 |
+
|
1930 |
+
if index is None:
|
1931 |
+
if col_dict:
|
1932 |
+
# Any column is actually fine:
|
1933 |
+
new_rows = len(next(iter(col_dict.values())))
|
1934 |
+
index = RangeIndex(self._currow, self._currow + new_rows)
|
1935 |
+
else:
|
1936 |
+
new_rows = 0
|
1937 |
+
else:
|
1938 |
+
new_rows = len(index)
|
1939 |
+
|
1940 |
+
if hasattr(self, "orig_options"):
|
1941 |
+
dtype_arg = self.orig_options.get("dtype", None)
|
1942 |
+
else:
|
1943 |
+
dtype_arg = None
|
1944 |
+
|
1945 |
+
if isinstance(dtype_arg, dict):
|
1946 |
+
dtype = defaultdict(lambda: None) # type: ignore[var-annotated]
|
1947 |
+
dtype.update(dtype_arg)
|
1948 |
+
elif dtype_arg is not None and pandas_dtype(dtype_arg) in (
|
1949 |
+
np.str_,
|
1950 |
+
np.object_,
|
1951 |
+
):
|
1952 |
+
dtype = defaultdict(lambda: dtype_arg)
|
1953 |
+
else:
|
1954 |
+
dtype = None
|
1955 |
+
|
1956 |
+
if dtype is not None:
|
1957 |
+
new_col_dict = {}
|
1958 |
+
for k, v in col_dict.items():
|
1959 |
+
d = (
|
1960 |
+
dtype[k]
|
1961 |
+
if pandas_dtype(dtype[k]) in (np.str_, np.object_)
|
1962 |
+
else None
|
1963 |
+
)
|
1964 |
+
new_col_dict[k] = Series(v, index=index, dtype=d, copy=False)
|
1965 |
+
else:
|
1966 |
+
new_col_dict = col_dict
|
1967 |
+
|
1968 |
+
df = DataFrame(
|
1969 |
+
new_col_dict,
|
1970 |
+
columns=columns,
|
1971 |
+
index=index,
|
1972 |
+
copy=not using_copy_on_write(),
|
1973 |
+
)
|
1974 |
+
|
1975 |
+
self._currow += new_rows
|
1976 |
+
return df
|
1977 |
+
|
1978 |
+
def get_chunk(self, size: int | None = None) -> DataFrame:
|
1979 |
+
if size is None:
|
1980 |
+
size = self.chunksize
|
1981 |
+
if self.nrows is not None:
|
1982 |
+
if self._currow >= self.nrows:
|
1983 |
+
raise StopIteration
|
1984 |
+
size = min(size, self.nrows - self._currow)
|
1985 |
+
return self.read(nrows=size)
|
1986 |
+
|
1987 |
+
def __enter__(self) -> Self:
|
1988 |
+
return self
|
1989 |
+
|
1990 |
+
def __exit__(
|
1991 |
+
self,
|
1992 |
+
exc_type: type[BaseException] | None,
|
1993 |
+
exc_value: BaseException | None,
|
1994 |
+
traceback: TracebackType | None,
|
1995 |
+
) -> None:
|
1996 |
+
self.close()
|
1997 |
+
|
1998 |
+
|
1999 |
+
def TextParser(*args, **kwds) -> TextFileReader:
|
2000 |
+
"""
|
2001 |
+
Converts lists of lists/tuples into DataFrames with proper type inference
|
2002 |
+
and optional (e.g. string to datetime) conversion. Also enables iterating
|
2003 |
+
lazily over chunks of large files
|
2004 |
+
|
2005 |
+
Parameters
|
2006 |
+
----------
|
2007 |
+
data : file-like object or list
|
2008 |
+
delimiter : separator character to use
|
2009 |
+
dialect : str or csv.Dialect instance, optional
|
2010 |
+
Ignored if delimiter is longer than 1 character
|
2011 |
+
names : sequence, default
|
2012 |
+
header : int, default 0
|
2013 |
+
Row to use to parse column labels. Defaults to the first row. Prior
|
2014 |
+
rows will be discarded
|
2015 |
+
index_col : int or list, optional
|
2016 |
+
Column or columns to use as the (possibly hierarchical) index
|
2017 |
+
has_index_names: bool, default False
|
2018 |
+
True if the cols defined in index_col have an index name and are
|
2019 |
+
not in the header.
|
2020 |
+
na_values : scalar, str, list-like, or dict, optional
|
2021 |
+
Additional strings to recognize as NA/NaN.
|
2022 |
+
keep_default_na : bool, default True
|
2023 |
+
thousands : str, optional
|
2024 |
+
Thousands separator
|
2025 |
+
comment : str, optional
|
2026 |
+
Comment out remainder of line
|
2027 |
+
parse_dates : bool, default False
|
2028 |
+
keep_date_col : bool, default False
|
2029 |
+
date_parser : function, optional
|
2030 |
+
|
2031 |
+
.. deprecated:: 2.0.0
|
2032 |
+
date_format : str or dict of column -> format, default ``None``
|
2033 |
+
|
2034 |
+
.. versionadded:: 2.0.0
|
2035 |
+
skiprows : list of integers
|
2036 |
+
Row numbers to skip
|
2037 |
+
skipfooter : int
|
2038 |
+
Number of line at bottom of file to skip
|
2039 |
+
converters : dict, optional
|
2040 |
+
Dict of functions for converting values in certain columns. Keys can
|
2041 |
+
either be integers or column labels, values are functions that take one
|
2042 |
+
input argument, the cell (not column) content, and return the
|
2043 |
+
transformed content.
|
2044 |
+
encoding : str, optional
|
2045 |
+
Encoding to use for UTF when reading/writing (ex. 'utf-8')
|
2046 |
+
float_precision : str, optional
|
2047 |
+
Specifies which converter the C engine should use for floating-point
|
2048 |
+
values. The options are `None` or `high` for the ordinary converter,
|
2049 |
+
`legacy` for the original lower precision pandas converter, and
|
2050 |
+
`round_trip` for the round-trip converter.
|
2051 |
+
"""
|
2052 |
+
kwds["engine"] = "python"
|
2053 |
+
return TextFileReader(*args, **kwds)
|
2054 |
+
|
2055 |
+
|
2056 |
+
def _clean_na_values(na_values, keep_default_na: bool = True, floatify: bool = True):
|
2057 |
+
na_fvalues: set | dict
|
2058 |
+
if na_values is None:
|
2059 |
+
if keep_default_na:
|
2060 |
+
na_values = STR_NA_VALUES
|
2061 |
+
else:
|
2062 |
+
na_values = set()
|
2063 |
+
na_fvalues = set()
|
2064 |
+
elif isinstance(na_values, dict):
|
2065 |
+
old_na_values = na_values.copy()
|
2066 |
+
na_values = {} # Prevent aliasing.
|
2067 |
+
|
2068 |
+
# Convert the values in the na_values dictionary
|
2069 |
+
# into array-likes for further use. This is also
|
2070 |
+
# where we append the default NaN values, provided
|
2071 |
+
# that `keep_default_na=True`.
|
2072 |
+
for k, v in old_na_values.items():
|
2073 |
+
if not is_list_like(v):
|
2074 |
+
v = [v]
|
2075 |
+
|
2076 |
+
if keep_default_na:
|
2077 |
+
v = set(v) | STR_NA_VALUES
|
2078 |
+
|
2079 |
+
na_values[k] = v
|
2080 |
+
na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}
|
2081 |
+
else:
|
2082 |
+
if not is_list_like(na_values):
|
2083 |
+
na_values = [na_values]
|
2084 |
+
na_values = _stringify_na_values(na_values, floatify)
|
2085 |
+
if keep_default_na:
|
2086 |
+
na_values = na_values | STR_NA_VALUES
|
2087 |
+
|
2088 |
+
na_fvalues = _floatify_na_values(na_values)
|
2089 |
+
|
2090 |
+
return na_values, na_fvalues
|
2091 |
+
|
2092 |
+
|
2093 |
+
def _floatify_na_values(na_values):
|
2094 |
+
# create float versions of the na_values
|
2095 |
+
result = set()
|
2096 |
+
for v in na_values:
|
2097 |
+
try:
|
2098 |
+
v = float(v)
|
2099 |
+
if not np.isnan(v):
|
2100 |
+
result.add(v)
|
2101 |
+
except (TypeError, ValueError, OverflowError):
|
2102 |
+
pass
|
2103 |
+
return result
|
2104 |
+
|
2105 |
+
|
2106 |
+
def _stringify_na_values(na_values, floatify: bool):
|
2107 |
+
"""return a stringified and numeric for these values"""
|
2108 |
+
result: list[str | float] = []
|
2109 |
+
for x in na_values:
|
2110 |
+
result.append(str(x))
|
2111 |
+
result.append(x)
|
2112 |
+
try:
|
2113 |
+
v = float(x)
|
2114 |
+
|
2115 |
+
# we are like 999 here
|
2116 |
+
if v == int(v):
|
2117 |
+
v = int(v)
|
2118 |
+
result.append(f"{v}.0")
|
2119 |
+
result.append(str(v))
|
2120 |
+
|
2121 |
+
if floatify:
|
2122 |
+
result.append(v)
|
2123 |
+
except (TypeError, ValueError, OverflowError):
|
2124 |
+
pass
|
2125 |
+
if floatify:
|
2126 |
+
try:
|
2127 |
+
result.append(int(x))
|
2128 |
+
except (TypeError, ValueError, OverflowError):
|
2129 |
+
pass
|
2130 |
+
return set(result)
|
2131 |
+
|
2132 |
+
|
2133 |
+
def _refine_defaults_read(
|
2134 |
+
dialect: str | csv.Dialect | None,
|
2135 |
+
delimiter: str | None | lib.NoDefault,
|
2136 |
+
delim_whitespace: bool,
|
2137 |
+
engine: CSVEngine | None,
|
2138 |
+
sep: str | None | lib.NoDefault,
|
2139 |
+
on_bad_lines: str | Callable,
|
2140 |
+
names: Sequence[Hashable] | None | lib.NoDefault,
|
2141 |
+
defaults: dict[str, Any],
|
2142 |
+
dtype_backend: DtypeBackend | lib.NoDefault,
|
2143 |
+
):
|
2144 |
+
"""Validate/refine default values of input parameters of read_csv, read_table.
|
2145 |
+
|
2146 |
+
Parameters
|
2147 |
+
----------
|
2148 |
+
dialect : str or csv.Dialect
|
2149 |
+
If provided, this parameter will override values (default or not) for the
|
2150 |
+
following parameters: `delimiter`, `doublequote`, `escapechar`,
|
2151 |
+
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
|
2152 |
+
override values, a ParserWarning will be issued. See csv.Dialect
|
2153 |
+
documentation for more details.
|
2154 |
+
delimiter : str or object
|
2155 |
+
Alias for sep.
|
2156 |
+
delim_whitespace : bool
|
2157 |
+
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
|
2158 |
+
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
|
2159 |
+
is set to True, nothing should be passed in for the ``delimiter``
|
2160 |
+
parameter.
|
2161 |
+
|
2162 |
+
.. deprecated:: 2.2.0
|
2163 |
+
Use ``sep="\\s+"`` instead.
|
2164 |
+
engine : {{'c', 'python'}}
|
2165 |
+
Parser engine to use. The C engine is faster while the python engine is
|
2166 |
+
currently more feature-complete.
|
2167 |
+
sep : str or object
|
2168 |
+
A delimiter provided by the user (str) or a sentinel value, i.e.
|
2169 |
+
pandas._libs.lib.no_default.
|
2170 |
+
on_bad_lines : str, callable
|
2171 |
+
An option for handling bad lines or a sentinel value(None).
|
2172 |
+
names : array-like, optional
|
2173 |
+
List of column names to use. If the file contains a header row,
|
2174 |
+
then you should explicitly pass ``header=0`` to override the column names.
|
2175 |
+
Duplicates in this list are not allowed.
|
2176 |
+
defaults: dict
|
2177 |
+
Default values of input parameters.
|
2178 |
+
|
2179 |
+
Returns
|
2180 |
+
-------
|
2181 |
+
kwds : dict
|
2182 |
+
Input parameters with correct values.
|
2183 |
+
|
2184 |
+
Raises
|
2185 |
+
------
|
2186 |
+
ValueError :
|
2187 |
+
If a delimiter was specified with ``sep`` (or ``delimiter``) and
|
2188 |
+
``delim_whitespace=True``.
|
2189 |
+
"""
|
2190 |
+
# fix types for sep, delimiter to Union(str, Any)
|
2191 |
+
delim_default = defaults["delimiter"]
|
2192 |
+
kwds: dict[str, Any] = {}
|
2193 |
+
# gh-23761
|
2194 |
+
#
|
2195 |
+
# When a dialect is passed, it overrides any of the overlapping
|
2196 |
+
# parameters passed in directly. We don't want to warn if the
|
2197 |
+
# default parameters were passed in (since it probably means
|
2198 |
+
# that the user didn't pass them in explicitly in the first place).
|
2199 |
+
#
|
2200 |
+
# "delimiter" is the annoying corner case because we alias it to
|
2201 |
+
# "sep" before doing comparison to the dialect values later on.
|
2202 |
+
# Thus, we need a flag to indicate that we need to "override"
|
2203 |
+
# the comparison to dialect values by checking if default values
|
2204 |
+
# for BOTH "delimiter" and "sep" were provided.
|
2205 |
+
if dialect is not None:
|
2206 |
+
kwds["sep_override"] = delimiter is None and (
|
2207 |
+
sep is lib.no_default or sep == delim_default
|
2208 |
+
)
|
2209 |
+
|
2210 |
+
if delimiter and (sep is not lib.no_default):
|
2211 |
+
raise ValueError("Specified a sep and a delimiter; you can only specify one.")
|
2212 |
+
|
2213 |
+
kwds["names"] = None if names is lib.no_default else names
|
2214 |
+
|
2215 |
+
# Alias sep -> delimiter.
|
2216 |
+
if delimiter is None:
|
2217 |
+
delimiter = sep
|
2218 |
+
|
2219 |
+
if delim_whitespace and (delimiter is not lib.no_default):
|
2220 |
+
raise ValueError(
|
2221 |
+
"Specified a delimiter with both sep and "
|
2222 |
+
"delim_whitespace=True; you can only specify one."
|
2223 |
+
)
|
2224 |
+
|
2225 |
+
if delimiter == "\n":
|
2226 |
+
raise ValueError(
|
2227 |
+
r"Specified \n as separator or delimiter. This forces the python engine "
|
2228 |
+
"which does not accept a line terminator. Hence it is not allowed to use "
|
2229 |
+
"the line terminator as separator.",
|
2230 |
+
)
|
2231 |
+
|
2232 |
+
if delimiter is lib.no_default:
|
2233 |
+
# assign default separator value
|
2234 |
+
kwds["delimiter"] = delim_default
|
2235 |
+
else:
|
2236 |
+
kwds["delimiter"] = delimiter
|
2237 |
+
|
2238 |
+
if engine is not None:
|
2239 |
+
kwds["engine_specified"] = True
|
2240 |
+
else:
|
2241 |
+
kwds["engine"] = "c"
|
2242 |
+
kwds["engine_specified"] = False
|
2243 |
+
|
2244 |
+
if on_bad_lines == "error":
|
2245 |
+
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR
|
2246 |
+
elif on_bad_lines == "warn":
|
2247 |
+
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN
|
2248 |
+
elif on_bad_lines == "skip":
|
2249 |
+
kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP
|
2250 |
+
elif callable(on_bad_lines):
|
2251 |
+
if engine not in ["python", "pyarrow"]:
|
2252 |
+
raise ValueError(
|
2253 |
+
"on_bad_line can only be a callable function "
|
2254 |
+
"if engine='python' or 'pyarrow'"
|
2255 |
+
)
|
2256 |
+
kwds["on_bad_lines"] = on_bad_lines
|
2257 |
+
else:
|
2258 |
+
raise ValueError(f"Argument {on_bad_lines} is invalid for on_bad_lines")
|
2259 |
+
|
2260 |
+
check_dtype_backend(dtype_backend)
|
2261 |
+
|
2262 |
+
kwds["dtype_backend"] = dtype_backend
|
2263 |
+
|
2264 |
+
return kwds
|
2265 |
+
|
2266 |
+
|
2267 |
+
def _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None:
|
2268 |
+
"""
|
2269 |
+
Extract concrete csv dialect instance.
|
2270 |
+
|
2271 |
+
Returns
|
2272 |
+
-------
|
2273 |
+
csv.Dialect or None
|
2274 |
+
"""
|
2275 |
+
if kwds.get("dialect") is None:
|
2276 |
+
return None
|
2277 |
+
|
2278 |
+
dialect = kwds["dialect"]
|
2279 |
+
if dialect in csv.list_dialects():
|
2280 |
+
dialect = csv.get_dialect(dialect)
|
2281 |
+
|
2282 |
+
_validate_dialect(dialect)
|
2283 |
+
|
2284 |
+
return dialect
|
2285 |
+
|
2286 |
+
|
2287 |
+
MANDATORY_DIALECT_ATTRS = (
|
2288 |
+
"delimiter",
|
2289 |
+
"doublequote",
|
2290 |
+
"escapechar",
|
2291 |
+
"skipinitialspace",
|
2292 |
+
"quotechar",
|
2293 |
+
"quoting",
|
2294 |
+
)
|
2295 |
+
|
2296 |
+
|
2297 |
+
def _validate_dialect(dialect: csv.Dialect) -> None:
|
2298 |
+
"""
|
2299 |
+
Validate csv dialect instance.
|
2300 |
+
|
2301 |
+
Raises
|
2302 |
+
------
|
2303 |
+
ValueError
|
2304 |
+
If incorrect dialect is provided.
|
2305 |
+
"""
|
2306 |
+
for param in MANDATORY_DIALECT_ATTRS:
|
2307 |
+
if not hasattr(dialect, param):
|
2308 |
+
raise ValueError(f"Invalid dialect {dialect} provided")
|
2309 |
+
|
2310 |
+
|
2311 |
+
def _merge_with_dialect_properties(
|
2312 |
+
dialect: csv.Dialect,
|
2313 |
+
defaults: dict[str, Any],
|
2314 |
+
) -> dict[str, Any]:
|
2315 |
+
"""
|
2316 |
+
Merge default kwargs in TextFileReader with dialect parameters.
|
2317 |
+
|
2318 |
+
Parameters
|
2319 |
+
----------
|
2320 |
+
dialect : csv.Dialect
|
2321 |
+
Concrete csv dialect. See csv.Dialect documentation for more details.
|
2322 |
+
defaults : dict
|
2323 |
+
Keyword arguments passed to TextFileReader.
|
2324 |
+
|
2325 |
+
Returns
|
2326 |
+
-------
|
2327 |
+
kwds : dict
|
2328 |
+
Updated keyword arguments, merged with dialect parameters.
|
2329 |
+
"""
|
2330 |
+
kwds = defaults.copy()
|
2331 |
+
|
2332 |
+
for param in MANDATORY_DIALECT_ATTRS:
|
2333 |
+
dialect_val = getattr(dialect, param)
|
2334 |
+
|
2335 |
+
parser_default = parser_defaults[param]
|
2336 |
+
provided = kwds.get(param, parser_default)
|
2337 |
+
|
2338 |
+
# Messages for conflicting values between the dialect
|
2339 |
+
# instance and the actual parameters provided.
|
2340 |
+
conflict_msgs = []
|
2341 |
+
|
2342 |
+
# Don't warn if the default parameter was passed in,
|
2343 |
+
# even if it conflicts with the dialect (gh-23761).
|
2344 |
+
if provided not in (parser_default, dialect_val):
|
2345 |
+
msg = (
|
2346 |
+
f"Conflicting values for '{param}': '{provided}' was "
|
2347 |
+
f"provided, but the dialect specifies '{dialect_val}'. "
|
2348 |
+
"Using the dialect-specified value."
|
2349 |
+
)
|
2350 |
+
|
2351 |
+
# Annoying corner case for not warning about
|
2352 |
+
# conflicts between dialect and delimiter parameter.
|
2353 |
+
# Refer to the outer "_read_" function for more info.
|
2354 |
+
if not (param == "delimiter" and kwds.pop("sep_override", False)):
|
2355 |
+
conflict_msgs.append(msg)
|
2356 |
+
|
2357 |
+
if conflict_msgs:
|
2358 |
+
warnings.warn(
|
2359 |
+
"\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level()
|
2360 |
+
)
|
2361 |
+
kwds[param] = dialect_val
|
2362 |
+
return kwds
|
2363 |
+
|
2364 |
+
|
2365 |
+
def _validate_skipfooter(kwds: dict[str, Any]) -> None:
|
2366 |
+
"""
|
2367 |
+
Check whether skipfooter is compatible with other kwargs in TextFileReader.
|
2368 |
+
|
2369 |
+
Parameters
|
2370 |
+
----------
|
2371 |
+
kwds : dict
|
2372 |
+
Keyword arguments passed to TextFileReader.
|
2373 |
+
|
2374 |
+
Raises
|
2375 |
+
------
|
2376 |
+
ValueError
|
2377 |
+
If skipfooter is not compatible with other parameters.
|
2378 |
+
"""
|
2379 |
+
if kwds.get("skipfooter"):
|
2380 |
+
if kwds.get("iterator") or kwds.get("chunksize"):
|
2381 |
+
raise ValueError("'skipfooter' not supported for iteration")
|
2382 |
+
if kwds.get("nrows"):
|
2383 |
+
raise ValueError("'skipfooter' not supported with 'nrows'")
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (181 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_api.cpython-310.pyc
ADDED
Binary file (7.84 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_types.cpython-310.pyc
ADDED
Binary file (1.89 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_api.py
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
from pandas import api
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.api import (
|
9 |
+
extensions as api_extensions,
|
10 |
+
indexers as api_indexers,
|
11 |
+
interchange as api_interchange,
|
12 |
+
types as api_types,
|
13 |
+
typing as api_typing,
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
class Base:
|
18 |
+
def check(self, namespace, expected, ignored=None):
|
19 |
+
# see which names are in the namespace, minus optional
|
20 |
+
# ignored ones
|
21 |
+
# compare vs the expected
|
22 |
+
|
23 |
+
result = sorted(
|
24 |
+
f for f in dir(namespace) if not f.startswith("__") and f != "annotations"
|
25 |
+
)
|
26 |
+
if ignored is not None:
|
27 |
+
result = sorted(set(result) - set(ignored))
|
28 |
+
|
29 |
+
expected = sorted(expected)
|
30 |
+
tm.assert_almost_equal(result, expected)
|
31 |
+
|
32 |
+
|
33 |
+
class TestPDApi(Base):
|
34 |
+
# these are optionally imported based on testing
|
35 |
+
# & need to be ignored
|
36 |
+
ignored = ["tests", "locale", "conftest", "_version_meson"]
|
37 |
+
|
38 |
+
# top-level sub-packages
|
39 |
+
public_lib = [
|
40 |
+
"api",
|
41 |
+
"arrays",
|
42 |
+
"options",
|
43 |
+
"test",
|
44 |
+
"testing",
|
45 |
+
"errors",
|
46 |
+
"plotting",
|
47 |
+
"io",
|
48 |
+
"tseries",
|
49 |
+
]
|
50 |
+
private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"]
|
51 |
+
|
52 |
+
# misc
|
53 |
+
misc = ["IndexSlice", "NaT", "NA"]
|
54 |
+
|
55 |
+
# top-level classes
|
56 |
+
classes = [
|
57 |
+
"ArrowDtype",
|
58 |
+
"Categorical",
|
59 |
+
"CategoricalIndex",
|
60 |
+
"DataFrame",
|
61 |
+
"DateOffset",
|
62 |
+
"DatetimeIndex",
|
63 |
+
"ExcelFile",
|
64 |
+
"ExcelWriter",
|
65 |
+
"Flags",
|
66 |
+
"Grouper",
|
67 |
+
"HDFStore",
|
68 |
+
"Index",
|
69 |
+
"MultiIndex",
|
70 |
+
"Period",
|
71 |
+
"PeriodIndex",
|
72 |
+
"RangeIndex",
|
73 |
+
"Series",
|
74 |
+
"SparseDtype",
|
75 |
+
"StringDtype",
|
76 |
+
"Timedelta",
|
77 |
+
"TimedeltaIndex",
|
78 |
+
"Timestamp",
|
79 |
+
"Interval",
|
80 |
+
"IntervalIndex",
|
81 |
+
"CategoricalDtype",
|
82 |
+
"PeriodDtype",
|
83 |
+
"IntervalDtype",
|
84 |
+
"DatetimeTZDtype",
|
85 |
+
"BooleanDtype",
|
86 |
+
"Int8Dtype",
|
87 |
+
"Int16Dtype",
|
88 |
+
"Int32Dtype",
|
89 |
+
"Int64Dtype",
|
90 |
+
"UInt8Dtype",
|
91 |
+
"UInt16Dtype",
|
92 |
+
"UInt32Dtype",
|
93 |
+
"UInt64Dtype",
|
94 |
+
"Float32Dtype",
|
95 |
+
"Float64Dtype",
|
96 |
+
"NamedAgg",
|
97 |
+
]
|
98 |
+
|
99 |
+
# these are already deprecated; awaiting removal
|
100 |
+
deprecated_classes: list[str] = []
|
101 |
+
|
102 |
+
# external modules exposed in pandas namespace
|
103 |
+
modules: list[str] = []
|
104 |
+
|
105 |
+
# top-level functions
|
106 |
+
funcs = [
|
107 |
+
"array",
|
108 |
+
"bdate_range",
|
109 |
+
"concat",
|
110 |
+
"crosstab",
|
111 |
+
"cut",
|
112 |
+
"date_range",
|
113 |
+
"interval_range",
|
114 |
+
"eval",
|
115 |
+
"factorize",
|
116 |
+
"get_dummies",
|
117 |
+
"from_dummies",
|
118 |
+
"infer_freq",
|
119 |
+
"isna",
|
120 |
+
"isnull",
|
121 |
+
"lreshape",
|
122 |
+
"melt",
|
123 |
+
"notna",
|
124 |
+
"notnull",
|
125 |
+
"offsets",
|
126 |
+
"merge",
|
127 |
+
"merge_ordered",
|
128 |
+
"merge_asof",
|
129 |
+
"period_range",
|
130 |
+
"pivot",
|
131 |
+
"pivot_table",
|
132 |
+
"qcut",
|
133 |
+
"show_versions",
|
134 |
+
"timedelta_range",
|
135 |
+
"unique",
|
136 |
+
"value_counts",
|
137 |
+
"wide_to_long",
|
138 |
+
]
|
139 |
+
|
140 |
+
# top-level option funcs
|
141 |
+
funcs_option = [
|
142 |
+
"reset_option",
|
143 |
+
"describe_option",
|
144 |
+
"get_option",
|
145 |
+
"option_context",
|
146 |
+
"set_option",
|
147 |
+
"set_eng_float_format",
|
148 |
+
]
|
149 |
+
|
150 |
+
# top-level read_* funcs
|
151 |
+
funcs_read = [
|
152 |
+
"read_clipboard",
|
153 |
+
"read_csv",
|
154 |
+
"read_excel",
|
155 |
+
"read_fwf",
|
156 |
+
"read_gbq",
|
157 |
+
"read_hdf",
|
158 |
+
"read_html",
|
159 |
+
"read_xml",
|
160 |
+
"read_json",
|
161 |
+
"read_pickle",
|
162 |
+
"read_sas",
|
163 |
+
"read_sql",
|
164 |
+
"read_sql_query",
|
165 |
+
"read_sql_table",
|
166 |
+
"read_stata",
|
167 |
+
"read_table",
|
168 |
+
"read_feather",
|
169 |
+
"read_parquet",
|
170 |
+
"read_orc",
|
171 |
+
"read_spss",
|
172 |
+
]
|
173 |
+
|
174 |
+
# top-level json funcs
|
175 |
+
funcs_json = ["json_normalize"]
|
176 |
+
|
177 |
+
# top-level to_* funcs
|
178 |
+
funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"]
|
179 |
+
|
180 |
+
# top-level to deprecate in the future
|
181 |
+
deprecated_funcs_in_future: list[str] = []
|
182 |
+
|
183 |
+
# these are already deprecated; awaiting removal
|
184 |
+
deprecated_funcs: list[str] = []
|
185 |
+
|
186 |
+
# private modules in pandas namespace
|
187 |
+
private_modules = [
|
188 |
+
"_config",
|
189 |
+
"_libs",
|
190 |
+
"_is_numpy_dev",
|
191 |
+
"_pandas_datetime_CAPI",
|
192 |
+
"_pandas_parser_CAPI",
|
193 |
+
"_testing",
|
194 |
+
"_typing",
|
195 |
+
]
|
196 |
+
if not pd._built_with_meson:
|
197 |
+
private_modules.append("_version")
|
198 |
+
|
199 |
+
def test_api(self):
|
200 |
+
checkthese = (
|
201 |
+
self.public_lib
|
202 |
+
+ self.private_lib
|
203 |
+
+ self.misc
|
204 |
+
+ self.modules
|
205 |
+
+ self.classes
|
206 |
+
+ self.funcs
|
207 |
+
+ self.funcs_option
|
208 |
+
+ self.funcs_read
|
209 |
+
+ self.funcs_json
|
210 |
+
+ self.funcs_to
|
211 |
+
+ self.private_modules
|
212 |
+
)
|
213 |
+
self.check(namespace=pd, expected=checkthese, ignored=self.ignored)
|
214 |
+
|
215 |
+
def test_api_all(self):
|
216 |
+
expected = set(
|
217 |
+
self.public_lib
|
218 |
+
+ self.misc
|
219 |
+
+ self.modules
|
220 |
+
+ self.classes
|
221 |
+
+ self.funcs
|
222 |
+
+ self.funcs_option
|
223 |
+
+ self.funcs_read
|
224 |
+
+ self.funcs_json
|
225 |
+
+ self.funcs_to
|
226 |
+
) - set(self.deprecated_classes)
|
227 |
+
actual = set(pd.__all__)
|
228 |
+
|
229 |
+
extraneous = actual - expected
|
230 |
+
assert not extraneous
|
231 |
+
|
232 |
+
missing = expected - actual
|
233 |
+
assert not missing
|
234 |
+
|
235 |
+
def test_depr(self):
|
236 |
+
deprecated_list = (
|
237 |
+
self.deprecated_classes
|
238 |
+
+ self.deprecated_funcs
|
239 |
+
+ self.deprecated_funcs_in_future
|
240 |
+
)
|
241 |
+
for depr in deprecated_list:
|
242 |
+
with tm.assert_produces_warning(FutureWarning):
|
243 |
+
_ = getattr(pd, depr)
|
244 |
+
|
245 |
+
|
246 |
+
class TestApi(Base):
|
247 |
+
allowed_api_dirs = [
|
248 |
+
"types",
|
249 |
+
"extensions",
|
250 |
+
"indexers",
|
251 |
+
"interchange",
|
252 |
+
"typing",
|
253 |
+
]
|
254 |
+
allowed_typing = [
|
255 |
+
"DataFrameGroupBy",
|
256 |
+
"DatetimeIndexResamplerGroupby",
|
257 |
+
"Expanding",
|
258 |
+
"ExpandingGroupby",
|
259 |
+
"ExponentialMovingWindow",
|
260 |
+
"ExponentialMovingWindowGroupby",
|
261 |
+
"JsonReader",
|
262 |
+
"NaTType",
|
263 |
+
"NAType",
|
264 |
+
"PeriodIndexResamplerGroupby",
|
265 |
+
"Resampler",
|
266 |
+
"Rolling",
|
267 |
+
"RollingGroupby",
|
268 |
+
"SeriesGroupBy",
|
269 |
+
"StataReader",
|
270 |
+
"TimedeltaIndexResamplerGroupby",
|
271 |
+
"TimeGrouper",
|
272 |
+
"Window",
|
273 |
+
]
|
274 |
+
allowed_api_types = [
|
275 |
+
"is_any_real_numeric_dtype",
|
276 |
+
"is_array_like",
|
277 |
+
"is_bool",
|
278 |
+
"is_bool_dtype",
|
279 |
+
"is_categorical_dtype",
|
280 |
+
"is_complex",
|
281 |
+
"is_complex_dtype",
|
282 |
+
"is_datetime64_any_dtype",
|
283 |
+
"is_datetime64_dtype",
|
284 |
+
"is_datetime64_ns_dtype",
|
285 |
+
"is_datetime64tz_dtype",
|
286 |
+
"is_dict_like",
|
287 |
+
"is_dtype_equal",
|
288 |
+
"is_extension_array_dtype",
|
289 |
+
"is_file_like",
|
290 |
+
"is_float",
|
291 |
+
"is_float_dtype",
|
292 |
+
"is_hashable",
|
293 |
+
"is_int64_dtype",
|
294 |
+
"is_integer",
|
295 |
+
"is_integer_dtype",
|
296 |
+
"is_interval",
|
297 |
+
"is_interval_dtype",
|
298 |
+
"is_iterator",
|
299 |
+
"is_list_like",
|
300 |
+
"is_named_tuple",
|
301 |
+
"is_number",
|
302 |
+
"is_numeric_dtype",
|
303 |
+
"is_object_dtype",
|
304 |
+
"is_period_dtype",
|
305 |
+
"is_re",
|
306 |
+
"is_re_compilable",
|
307 |
+
"is_scalar",
|
308 |
+
"is_signed_integer_dtype",
|
309 |
+
"is_sparse",
|
310 |
+
"is_string_dtype",
|
311 |
+
"is_timedelta64_dtype",
|
312 |
+
"is_timedelta64_ns_dtype",
|
313 |
+
"is_unsigned_integer_dtype",
|
314 |
+
"pandas_dtype",
|
315 |
+
"infer_dtype",
|
316 |
+
"union_categoricals",
|
317 |
+
"CategoricalDtype",
|
318 |
+
"DatetimeTZDtype",
|
319 |
+
"IntervalDtype",
|
320 |
+
"PeriodDtype",
|
321 |
+
]
|
322 |
+
allowed_api_interchange = ["from_dataframe", "DataFrame"]
|
323 |
+
allowed_api_indexers = [
|
324 |
+
"check_array_indexer",
|
325 |
+
"BaseIndexer",
|
326 |
+
"FixedForwardWindowIndexer",
|
327 |
+
"VariableOffsetWindowIndexer",
|
328 |
+
]
|
329 |
+
allowed_api_extensions = [
|
330 |
+
"no_default",
|
331 |
+
"ExtensionDtype",
|
332 |
+
"register_extension_dtype",
|
333 |
+
"register_dataframe_accessor",
|
334 |
+
"register_index_accessor",
|
335 |
+
"register_series_accessor",
|
336 |
+
"take",
|
337 |
+
"ExtensionArray",
|
338 |
+
"ExtensionScalarOpsMixin",
|
339 |
+
]
|
340 |
+
|
341 |
+
def test_api(self):
|
342 |
+
self.check(api, self.allowed_api_dirs)
|
343 |
+
|
344 |
+
def test_api_typing(self):
|
345 |
+
self.check(api_typing, self.allowed_typing)
|
346 |
+
|
347 |
+
def test_api_types(self):
|
348 |
+
self.check(api_types, self.allowed_api_types)
|
349 |
+
|
350 |
+
def test_api_interchange(self):
|
351 |
+
self.check(api_interchange, self.allowed_api_interchange)
|
352 |
+
|
353 |
+
def test_api_indexers(self):
|
354 |
+
self.check(api_indexers, self.allowed_api_indexers)
|
355 |
+
|
356 |
+
def test_api_extensions(self):
|
357 |
+
self.check(api_extensions, self.allowed_api_extensions)
|
358 |
+
|
359 |
+
|
360 |
+
class TestTesting(Base):
|
361 |
+
funcs = [
|
362 |
+
"assert_frame_equal",
|
363 |
+
"assert_series_equal",
|
364 |
+
"assert_index_equal",
|
365 |
+
"assert_extension_array_equal",
|
366 |
+
]
|
367 |
+
|
368 |
+
def test_testing(self):
|
369 |
+
from pandas import testing
|
370 |
+
|
371 |
+
self.check(testing, self.funcs)
|
372 |
+
|
373 |
+
def test_util_in_top_level(self):
|
374 |
+
with pytest.raises(AttributeError, match="foo"):
|
375 |
+
pd.util.foo
|
376 |
+
|
377 |
+
|
378 |
+
def test_pandas_array_alias():
|
379 |
+
msg = "PandasArray has been renamed NumpyExtensionArray"
|
380 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
381 |
+
res = pd.arrays.PandasArray
|
382 |
+
|
383 |
+
assert res is pd.arrays.NumpyExtensionArray
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_types.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import pandas._testing as tm
|
4 |
+
from pandas.api import types
|
5 |
+
from pandas.tests.api.test_api import Base
|
6 |
+
|
7 |
+
|
8 |
+
class TestTypes(Base):
|
9 |
+
allowed = [
|
10 |
+
"is_any_real_numeric_dtype",
|
11 |
+
"is_bool",
|
12 |
+
"is_bool_dtype",
|
13 |
+
"is_categorical_dtype",
|
14 |
+
"is_complex",
|
15 |
+
"is_complex_dtype",
|
16 |
+
"is_datetime64_any_dtype",
|
17 |
+
"is_datetime64_dtype",
|
18 |
+
"is_datetime64_ns_dtype",
|
19 |
+
"is_datetime64tz_dtype",
|
20 |
+
"is_dtype_equal",
|
21 |
+
"is_float",
|
22 |
+
"is_float_dtype",
|
23 |
+
"is_int64_dtype",
|
24 |
+
"is_integer",
|
25 |
+
"is_integer_dtype",
|
26 |
+
"is_number",
|
27 |
+
"is_numeric_dtype",
|
28 |
+
"is_object_dtype",
|
29 |
+
"is_scalar",
|
30 |
+
"is_sparse",
|
31 |
+
"is_string_dtype",
|
32 |
+
"is_signed_integer_dtype",
|
33 |
+
"is_timedelta64_dtype",
|
34 |
+
"is_timedelta64_ns_dtype",
|
35 |
+
"is_unsigned_integer_dtype",
|
36 |
+
"is_period_dtype",
|
37 |
+
"is_interval",
|
38 |
+
"is_interval_dtype",
|
39 |
+
"is_re",
|
40 |
+
"is_re_compilable",
|
41 |
+
"is_dict_like",
|
42 |
+
"is_iterator",
|
43 |
+
"is_file_like",
|
44 |
+
"is_list_like",
|
45 |
+
"is_hashable",
|
46 |
+
"is_array_like",
|
47 |
+
"is_named_tuple",
|
48 |
+
"pandas_dtype",
|
49 |
+
"union_categoricals",
|
50 |
+
"infer_dtype",
|
51 |
+
"is_extension_array_dtype",
|
52 |
+
]
|
53 |
+
deprecated: list[str] = []
|
54 |
+
dtypes = ["CategoricalDtype", "DatetimeTZDtype", "PeriodDtype", "IntervalDtype"]
|
55 |
+
|
56 |
+
def test_types(self):
|
57 |
+
self.check(types, self.allowed + self.dtypes + self.deprecated)
|
58 |
+
|
59 |
+
def test_deprecated_from_api_types(self):
|
60 |
+
for t in self.deprecated:
|
61 |
+
with tm.assert_produces_warning(FutureWarning):
|
62 |
+
getattr(types, t)(1)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_algos.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
|
7 |
+
|
8 |
+
@pytest.mark.parametrize("ordered", [True, False])
|
9 |
+
@pytest.mark.parametrize("categories", [["b", "a", "c"], ["a", "b", "c", "d"]])
|
10 |
+
def test_factorize(categories, ordered):
|
11 |
+
cat = pd.Categorical(
|
12 |
+
["b", "b", "a", "c", None], categories=categories, ordered=ordered
|
13 |
+
)
|
14 |
+
codes, uniques = pd.factorize(cat)
|
15 |
+
expected_codes = np.array([0, 0, 1, 2, -1], dtype=np.intp)
|
16 |
+
expected_uniques = pd.Categorical(
|
17 |
+
["b", "a", "c"], categories=categories, ordered=ordered
|
18 |
+
)
|
19 |
+
|
20 |
+
tm.assert_numpy_array_equal(codes, expected_codes)
|
21 |
+
tm.assert_categorical_equal(uniques, expected_uniques)
|
22 |
+
|
23 |
+
|
24 |
+
def test_factorized_sort():
|
25 |
+
cat = pd.Categorical(["b", "b", None, "a"])
|
26 |
+
codes, uniques = pd.factorize(cat, sort=True)
|
27 |
+
expected_codes = np.array([1, 1, -1, 0], dtype=np.intp)
|
28 |
+
expected_uniques = pd.Categorical(["a", "b"])
|
29 |
+
|
30 |
+
tm.assert_numpy_array_equal(codes, expected_codes)
|
31 |
+
tm.assert_categorical_equal(uniques, expected_uniques)
|
32 |
+
|
33 |
+
|
34 |
+
def test_factorized_sort_ordered():
|
35 |
+
cat = pd.Categorical(
|
36 |
+
["b", "b", None, "a"], categories=["c", "b", "a"], ordered=True
|
37 |
+
)
|
38 |
+
|
39 |
+
codes, uniques = pd.factorize(cat, sort=True)
|
40 |
+
expected_codes = np.array([0, 0, -1, 1], dtype=np.intp)
|
41 |
+
expected_uniques = pd.Categorical(
|
42 |
+
["b", "a"], categories=["c", "b", "a"], ordered=True
|
43 |
+
)
|
44 |
+
|
45 |
+
tm.assert_numpy_array_equal(codes, expected_codes)
|
46 |
+
tm.assert_categorical_equal(uniques, expected_uniques)
|
47 |
+
|
48 |
+
|
49 |
+
def test_isin_cats():
|
50 |
+
# GH2003
|
51 |
+
cat = pd.Categorical(["a", "b", np.nan])
|
52 |
+
|
53 |
+
result = cat.isin(["a", np.nan])
|
54 |
+
expected = np.array([True, False, True], dtype=bool)
|
55 |
+
tm.assert_numpy_array_equal(expected, result)
|
56 |
+
|
57 |
+
result = cat.isin(["a", "c"])
|
58 |
+
expected = np.array([True, False, False], dtype=bool)
|
59 |
+
tm.assert_numpy_array_equal(expected, result)
|
60 |
+
|
61 |
+
|
62 |
+
@pytest.mark.parametrize("value", [[""], [None, ""], [pd.NaT, ""]])
|
63 |
+
def test_isin_cats_corner_cases(value):
|
64 |
+
# GH36550
|
65 |
+
cat = pd.Categorical([""])
|
66 |
+
result = cat.isin(value)
|
67 |
+
expected = np.array([True], dtype=bool)
|
68 |
+
tm.assert_numpy_array_equal(expected, result)
|
69 |
+
|
70 |
+
|
71 |
+
@pytest.mark.parametrize("empty", [[], pd.Series(dtype=object), np.array([])])
|
72 |
+
def test_isin_empty(empty):
|
73 |
+
s = pd.Categorical(["a", "b"])
|
74 |
+
expected = np.array([False, False], dtype=bool)
|
75 |
+
|
76 |
+
result = s.isin(empty)
|
77 |
+
tm.assert_numpy_array_equal(expected, result)
|
78 |
+
|
79 |
+
|
80 |
+
def test_diff():
|
81 |
+
ser = pd.Series([1, 2, 3], dtype="category")
|
82 |
+
|
83 |
+
msg = "Convert to a suitable dtype"
|
84 |
+
with pytest.raises(TypeError, match=msg):
|
85 |
+
ser.diff()
|
86 |
+
|
87 |
+
df = ser.to_frame(name="A")
|
88 |
+
with pytest.raises(TypeError, match=msg):
|
89 |
+
df.diff()
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_analytics.py
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import sys
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from pandas.compat import PYPY
|
8 |
+
|
9 |
+
from pandas import (
|
10 |
+
Categorical,
|
11 |
+
CategoricalDtype,
|
12 |
+
DataFrame,
|
13 |
+
Index,
|
14 |
+
NaT,
|
15 |
+
Series,
|
16 |
+
date_range,
|
17 |
+
)
|
18 |
+
import pandas._testing as tm
|
19 |
+
from pandas.api.types import is_scalar
|
20 |
+
|
21 |
+
|
22 |
+
class TestCategoricalAnalytics:
|
23 |
+
@pytest.mark.parametrize("aggregation", ["min", "max"])
|
24 |
+
def test_min_max_not_ordered_raises(self, aggregation):
|
25 |
+
# unordered cats have no min/max
|
26 |
+
cat = Categorical(["a", "b", "c", "d"], ordered=False)
|
27 |
+
msg = f"Categorical is not ordered for operation {aggregation}"
|
28 |
+
agg_func = getattr(cat, aggregation)
|
29 |
+
|
30 |
+
with pytest.raises(TypeError, match=msg):
|
31 |
+
agg_func()
|
32 |
+
|
33 |
+
ufunc = np.minimum if aggregation == "min" else np.maximum
|
34 |
+
with pytest.raises(TypeError, match=msg):
|
35 |
+
ufunc.reduce(cat)
|
36 |
+
|
37 |
+
def test_min_max_ordered(self, index_or_series_or_array):
|
38 |
+
cat = Categorical(["a", "b", "c", "d"], ordered=True)
|
39 |
+
obj = index_or_series_or_array(cat)
|
40 |
+
_min = obj.min()
|
41 |
+
_max = obj.max()
|
42 |
+
assert _min == "a"
|
43 |
+
assert _max == "d"
|
44 |
+
|
45 |
+
assert np.minimum.reduce(obj) == "a"
|
46 |
+
assert np.maximum.reduce(obj) == "d"
|
47 |
+
# TODO: raises if we pass axis=0 (on Index and Categorical, not Series)
|
48 |
+
|
49 |
+
cat = Categorical(
|
50 |
+
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
|
51 |
+
)
|
52 |
+
obj = index_or_series_or_array(cat)
|
53 |
+
_min = obj.min()
|
54 |
+
_max = obj.max()
|
55 |
+
assert _min == "d"
|
56 |
+
assert _max == "a"
|
57 |
+
assert np.minimum.reduce(obj) == "d"
|
58 |
+
assert np.maximum.reduce(obj) == "a"
|
59 |
+
|
60 |
+
def test_min_max_reduce(self):
|
61 |
+
# GH52788
|
62 |
+
cat = Categorical(["a", "b", "c", "d"], ordered=True)
|
63 |
+
df = DataFrame(cat)
|
64 |
+
|
65 |
+
result_max = df.agg("max")
|
66 |
+
expected_max = Series(Categorical(["d"], dtype=cat.dtype))
|
67 |
+
tm.assert_series_equal(result_max, expected_max)
|
68 |
+
|
69 |
+
result_min = df.agg("min")
|
70 |
+
expected_min = Series(Categorical(["a"], dtype=cat.dtype))
|
71 |
+
tm.assert_series_equal(result_min, expected_min)
|
72 |
+
|
73 |
+
@pytest.mark.parametrize(
|
74 |
+
"categories,expected",
|
75 |
+
[
|
76 |
+
(list("ABC"), np.nan),
|
77 |
+
([1, 2, 3], np.nan),
|
78 |
+
pytest.param(
|
79 |
+
Series(date_range("2020-01-01", periods=3), dtype="category"),
|
80 |
+
NaT,
|
81 |
+
marks=pytest.mark.xfail(
|
82 |
+
reason="https://github.com/pandas-dev/pandas/issues/29962"
|
83 |
+
),
|
84 |
+
),
|
85 |
+
],
|
86 |
+
)
|
87 |
+
@pytest.mark.parametrize("aggregation", ["min", "max"])
|
88 |
+
def test_min_max_ordered_empty(self, categories, expected, aggregation):
|
89 |
+
# GH 30227
|
90 |
+
cat = Categorical([], categories=categories, ordered=True)
|
91 |
+
|
92 |
+
agg_func = getattr(cat, aggregation)
|
93 |
+
result = agg_func()
|
94 |
+
assert result is expected
|
95 |
+
|
96 |
+
@pytest.mark.parametrize(
|
97 |
+
"values, categories",
|
98 |
+
[(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])],
|
99 |
+
)
|
100 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
101 |
+
@pytest.mark.parametrize("function", ["min", "max"])
|
102 |
+
def test_min_max_with_nan(self, values, categories, function, skipna):
|
103 |
+
# GH 25303
|
104 |
+
cat = Categorical(values, categories=categories, ordered=True)
|
105 |
+
result = getattr(cat, function)(skipna=skipna)
|
106 |
+
|
107 |
+
if skipna is False:
|
108 |
+
assert result is np.nan
|
109 |
+
else:
|
110 |
+
expected = categories[0] if function == "min" else categories[2]
|
111 |
+
assert result == expected
|
112 |
+
|
113 |
+
@pytest.mark.parametrize("function", ["min", "max"])
|
114 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
115 |
+
def test_min_max_only_nan(self, function, skipna):
|
116 |
+
# https://github.com/pandas-dev/pandas/issues/33450
|
117 |
+
cat = Categorical([np.nan], categories=[1, 2], ordered=True)
|
118 |
+
result = getattr(cat, function)(skipna=skipna)
|
119 |
+
assert result is np.nan
|
120 |
+
|
121 |
+
@pytest.mark.parametrize("method", ["min", "max"])
|
122 |
+
def test_numeric_only_min_max_raises(self, method):
|
123 |
+
# GH 25303
|
124 |
+
cat = Categorical(
|
125 |
+
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
|
126 |
+
)
|
127 |
+
with pytest.raises(TypeError, match=".* got an unexpected keyword"):
|
128 |
+
getattr(cat, method)(numeric_only=True)
|
129 |
+
|
130 |
+
@pytest.mark.parametrize("method", ["min", "max"])
|
131 |
+
def test_numpy_min_max_raises(self, method):
|
132 |
+
cat = Categorical(["a", "b", "c", "b"], ordered=False)
|
133 |
+
msg = (
|
134 |
+
f"Categorical is not ordered for operation {method}\n"
|
135 |
+
"you can use .as_ordered() to change the Categorical to an ordered one"
|
136 |
+
)
|
137 |
+
method = getattr(np, method)
|
138 |
+
with pytest.raises(TypeError, match=re.escape(msg)):
|
139 |
+
method(cat)
|
140 |
+
|
141 |
+
@pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"])
|
142 |
+
@pytest.mark.parametrize("method", ["min", "max"])
|
143 |
+
def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
|
144 |
+
cat = Categorical(["a", "b", "c", "b"], ordered=True)
|
145 |
+
msg = (
|
146 |
+
f"the '{kwarg}' parameter is not supported in the pandas implementation "
|
147 |
+
f"of {method}"
|
148 |
+
)
|
149 |
+
if kwarg == "axis":
|
150 |
+
msg = r"`axis` must be fewer than the number of dimensions \(1\)"
|
151 |
+
kwargs = {kwarg: 42}
|
152 |
+
method = getattr(np, method)
|
153 |
+
with pytest.raises(ValueError, match=msg):
|
154 |
+
method(cat, **kwargs)
|
155 |
+
|
156 |
+
@pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")])
|
157 |
+
def test_numpy_min_max_axis_equals_none(self, method, expected):
|
158 |
+
cat = Categorical(["a", "b", "c", "b"], ordered=True)
|
159 |
+
method = getattr(np, method)
|
160 |
+
result = method(cat, axis=None)
|
161 |
+
assert result == expected
|
162 |
+
|
163 |
+
@pytest.mark.parametrize(
|
164 |
+
"values,categories,exp_mode",
|
165 |
+
[
|
166 |
+
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
|
167 |
+
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
|
168 |
+
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
|
169 |
+
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
|
170 |
+
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
|
171 |
+
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
|
172 |
+
],
|
173 |
+
)
|
174 |
+
def test_mode(self, values, categories, exp_mode):
|
175 |
+
cat = Categorical(values, categories=categories, ordered=True)
|
176 |
+
res = Series(cat).mode()._values
|
177 |
+
exp = Categorical(exp_mode, categories=categories, ordered=True)
|
178 |
+
tm.assert_categorical_equal(res, exp)
|
179 |
+
|
180 |
+
def test_searchsorted(self, ordered):
|
181 |
+
# https://github.com/pandas-dev/pandas/issues/8420
|
182 |
+
# https://github.com/pandas-dev/pandas/issues/14522
|
183 |
+
|
184 |
+
cat = Categorical(
|
185 |
+
["cheese", "milk", "apple", "bread", "bread"],
|
186 |
+
categories=["cheese", "milk", "apple", "bread"],
|
187 |
+
ordered=ordered,
|
188 |
+
)
|
189 |
+
ser = Series(cat)
|
190 |
+
|
191 |
+
# Searching for single item argument, side='left' (default)
|
192 |
+
res_cat = cat.searchsorted("apple")
|
193 |
+
assert res_cat == 2
|
194 |
+
assert is_scalar(res_cat)
|
195 |
+
|
196 |
+
res_ser = ser.searchsorted("apple")
|
197 |
+
assert res_ser == 2
|
198 |
+
assert is_scalar(res_ser)
|
199 |
+
|
200 |
+
# Searching for single item array, side='left' (default)
|
201 |
+
res_cat = cat.searchsorted(["bread"])
|
202 |
+
res_ser = ser.searchsorted(["bread"])
|
203 |
+
exp = np.array([3], dtype=np.intp)
|
204 |
+
tm.assert_numpy_array_equal(res_cat, exp)
|
205 |
+
tm.assert_numpy_array_equal(res_ser, exp)
|
206 |
+
|
207 |
+
# Searching for several items array, side='right'
|
208 |
+
res_cat = cat.searchsorted(["apple", "bread"], side="right")
|
209 |
+
res_ser = ser.searchsorted(["apple", "bread"], side="right")
|
210 |
+
exp = np.array([3, 5], dtype=np.intp)
|
211 |
+
tm.assert_numpy_array_equal(res_cat, exp)
|
212 |
+
tm.assert_numpy_array_equal(res_ser, exp)
|
213 |
+
|
214 |
+
# Searching for a single value that is not from the Categorical
|
215 |
+
with pytest.raises(TypeError, match="cucumber"):
|
216 |
+
cat.searchsorted("cucumber")
|
217 |
+
with pytest.raises(TypeError, match="cucumber"):
|
218 |
+
ser.searchsorted("cucumber")
|
219 |
+
|
220 |
+
# Searching for multiple values one of each is not from the Categorical
|
221 |
+
msg = (
|
222 |
+
"Cannot setitem on a Categorical with a new category, "
|
223 |
+
"set the categories first"
|
224 |
+
)
|
225 |
+
with pytest.raises(TypeError, match=msg):
|
226 |
+
cat.searchsorted(["bread", "cucumber"])
|
227 |
+
with pytest.raises(TypeError, match=msg):
|
228 |
+
ser.searchsorted(["bread", "cucumber"])
|
229 |
+
|
230 |
+
def test_unique(self, ordered):
|
231 |
+
# GH38140
|
232 |
+
dtype = CategoricalDtype(["a", "b", "c"], ordered=ordered)
|
233 |
+
|
234 |
+
# categories are reordered based on value when ordered=False
|
235 |
+
cat = Categorical(["a", "b", "c"], dtype=dtype)
|
236 |
+
res = cat.unique()
|
237 |
+
tm.assert_categorical_equal(res, cat)
|
238 |
+
|
239 |
+
cat = Categorical(["a", "b", "a", "a"], dtype=dtype)
|
240 |
+
res = cat.unique()
|
241 |
+
tm.assert_categorical_equal(res, Categorical(["a", "b"], dtype=dtype))
|
242 |
+
|
243 |
+
cat = Categorical(["c", "a", "b", "a", "a"], dtype=dtype)
|
244 |
+
res = cat.unique()
|
245 |
+
exp_cat = Categorical(["c", "a", "b"], dtype=dtype)
|
246 |
+
tm.assert_categorical_equal(res, exp_cat)
|
247 |
+
|
248 |
+
# nan must be removed
|
249 |
+
cat = Categorical(["b", np.nan, "b", np.nan, "a"], dtype=dtype)
|
250 |
+
res = cat.unique()
|
251 |
+
exp_cat = Categorical(["b", np.nan, "a"], dtype=dtype)
|
252 |
+
tm.assert_categorical_equal(res, exp_cat)
|
253 |
+
|
254 |
+
def test_unique_index_series(self, ordered):
|
255 |
+
# GH38140
|
256 |
+
dtype = CategoricalDtype([3, 2, 1], ordered=ordered)
|
257 |
+
|
258 |
+
c = Categorical([3, 1, 2, 2, 1], dtype=dtype)
|
259 |
+
# Categorical.unique sorts categories by appearance order
|
260 |
+
# if ordered=False
|
261 |
+
exp = Categorical([3, 1, 2], dtype=dtype)
|
262 |
+
tm.assert_categorical_equal(c.unique(), exp)
|
263 |
+
|
264 |
+
tm.assert_index_equal(Index(c).unique(), Index(exp))
|
265 |
+
tm.assert_categorical_equal(Series(c).unique(), exp)
|
266 |
+
|
267 |
+
c = Categorical([1, 1, 2, 2], dtype=dtype)
|
268 |
+
exp = Categorical([1, 2], dtype=dtype)
|
269 |
+
tm.assert_categorical_equal(c.unique(), exp)
|
270 |
+
tm.assert_index_equal(Index(c).unique(), Index(exp))
|
271 |
+
tm.assert_categorical_equal(Series(c).unique(), exp)
|
272 |
+
|
273 |
+
def test_shift(self):
|
274 |
+
# GH 9416
|
275 |
+
cat = Categorical(["a", "b", "c", "d", "a"])
|
276 |
+
|
277 |
+
# shift forward
|
278 |
+
sp1 = cat.shift(1)
|
279 |
+
xp1 = Categorical([np.nan, "a", "b", "c", "d"])
|
280 |
+
tm.assert_categorical_equal(sp1, xp1)
|
281 |
+
tm.assert_categorical_equal(cat[:-1], sp1[1:])
|
282 |
+
|
283 |
+
# shift back
|
284 |
+
sn2 = cat.shift(-2)
|
285 |
+
xp2 = Categorical(
|
286 |
+
["c", "d", "a", np.nan, np.nan], categories=["a", "b", "c", "d"]
|
287 |
+
)
|
288 |
+
tm.assert_categorical_equal(sn2, xp2)
|
289 |
+
tm.assert_categorical_equal(cat[2:], sn2[:-2])
|
290 |
+
|
291 |
+
# shift by zero
|
292 |
+
tm.assert_categorical_equal(cat, cat.shift(0))
|
293 |
+
|
294 |
+
def test_nbytes(self):
|
295 |
+
cat = Categorical([1, 2, 3])
|
296 |
+
exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
|
297 |
+
assert cat.nbytes == exp
|
298 |
+
|
299 |
+
def test_memory_usage(self):
|
300 |
+
cat = Categorical([1, 2, 3])
|
301 |
+
|
302 |
+
# .categories is an index, so we include the hashtable
|
303 |
+
assert 0 < cat.nbytes <= cat.memory_usage()
|
304 |
+
assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
|
305 |
+
|
306 |
+
cat = Categorical(["foo", "foo", "bar"])
|
307 |
+
assert cat.memory_usage(deep=True) > cat.nbytes
|
308 |
+
|
309 |
+
if not PYPY:
|
310 |
+
# sys.getsizeof will call the .memory_usage with
|
311 |
+
# deep=True, and add on some GC overhead
|
312 |
+
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
|
313 |
+
assert abs(diff) < 100
|
314 |
+
|
315 |
+
def test_map(self):
|
316 |
+
c = Categorical(list("ABABC"), categories=list("CBA"), ordered=True)
|
317 |
+
result = c.map(lambda x: x.lower(), na_action=None)
|
318 |
+
exp = Categorical(list("ababc"), categories=list("cba"), ordered=True)
|
319 |
+
tm.assert_categorical_equal(result, exp)
|
320 |
+
|
321 |
+
c = Categorical(list("ABABC"), categories=list("ABC"), ordered=False)
|
322 |
+
result = c.map(lambda x: x.lower(), na_action=None)
|
323 |
+
exp = Categorical(list("ababc"), categories=list("abc"), ordered=False)
|
324 |
+
tm.assert_categorical_equal(result, exp)
|
325 |
+
|
326 |
+
result = c.map(lambda x: 1, na_action=None)
|
327 |
+
# GH 12766: Return an index not an array
|
328 |
+
tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
|
329 |
+
|
330 |
+
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
|
331 |
+
def test_validate_inplace_raises(self, value):
|
332 |
+
cat = Categorical(["A", "B", "B", "C", "A"])
|
333 |
+
msg = (
|
334 |
+
'For argument "inplace" expected type bool, '
|
335 |
+
f"received type {type(value).__name__}"
|
336 |
+
)
|
337 |
+
|
338 |
+
with pytest.raises(ValueError, match=msg):
|
339 |
+
cat.sort_values(inplace=value)
|
340 |
+
|
341 |
+
def test_quantile_empty(self):
|
342 |
+
# make sure we have correct itemsize on resulting codes
|
343 |
+
cat = Categorical(["A", "B"])
|
344 |
+
idx = Index([0.0, 0.5])
|
345 |
+
result = cat[:0]._quantile(idx, interpolation="linear")
|
346 |
+
assert result._codes.dtype == np.int8
|
347 |
+
|
348 |
+
expected = cat.take([-1, -1], allow_fill=True)
|
349 |
+
tm.assert_extension_array_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_missing.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
from pandas import (
|
10 |
+
Categorical,
|
11 |
+
DataFrame,
|
12 |
+
Index,
|
13 |
+
Series,
|
14 |
+
isna,
|
15 |
+
)
|
16 |
+
import pandas._testing as tm
|
17 |
+
|
18 |
+
|
19 |
+
class TestCategoricalMissing:
|
20 |
+
def test_isna(self):
|
21 |
+
exp = np.array([False, False, True])
|
22 |
+
cat = Categorical(["a", "b", np.nan])
|
23 |
+
res = cat.isna()
|
24 |
+
|
25 |
+
tm.assert_numpy_array_equal(res, exp)
|
26 |
+
|
27 |
+
def test_na_flags_int_categories(self):
|
28 |
+
# #1457
|
29 |
+
|
30 |
+
categories = list(range(10))
|
31 |
+
labels = np.random.default_rng(2).integers(0, 10, 20)
|
32 |
+
labels[::5] = -1
|
33 |
+
|
34 |
+
cat = Categorical(labels, categories)
|
35 |
+
repr(cat)
|
36 |
+
|
37 |
+
tm.assert_numpy_array_equal(isna(cat), labels == -1)
|
38 |
+
|
39 |
+
def test_nan_handling(self):
|
40 |
+
# Nans are represented as -1 in codes
|
41 |
+
c = Categorical(["a", "b", np.nan, "a"])
|
42 |
+
tm.assert_index_equal(c.categories, Index(["a", "b"]))
|
43 |
+
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
|
44 |
+
c[1] = np.nan
|
45 |
+
tm.assert_index_equal(c.categories, Index(["a", "b"]))
|
46 |
+
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8))
|
47 |
+
|
48 |
+
# Adding nan to categories should make assigned nan point to the
|
49 |
+
# category!
|
50 |
+
c = Categorical(["a", "b", np.nan, "a"])
|
51 |
+
tm.assert_index_equal(c.categories, Index(["a", "b"]))
|
52 |
+
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
|
53 |
+
|
54 |
+
def test_set_dtype_nans(self):
|
55 |
+
c = Categorical(["a", "b", np.nan])
|
56 |
+
result = c._set_dtype(CategoricalDtype(["a", "c"]))
|
57 |
+
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8"))
|
58 |
+
|
59 |
+
def test_set_item_nan(self):
|
60 |
+
cat = Categorical([1, 2, 3])
|
61 |
+
cat[1] = np.nan
|
62 |
+
|
63 |
+
exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
|
64 |
+
tm.assert_categorical_equal(cat, exp)
|
65 |
+
|
66 |
+
@pytest.mark.parametrize(
|
67 |
+
"fillna_kwargs, msg",
|
68 |
+
[
|
69 |
+
(
|
70 |
+
{"value": 1, "method": "ffill"},
|
71 |
+
"Cannot specify both 'value' and 'method'.",
|
72 |
+
),
|
73 |
+
({}, "Must specify a fill 'value' or 'method'."),
|
74 |
+
({"method": "bad"}, "Invalid fill method. Expecting .* bad"),
|
75 |
+
(
|
76 |
+
{"value": Series([1, 2, 3, 4, "a"])},
|
77 |
+
"Cannot setitem on a Categorical with a new category",
|
78 |
+
),
|
79 |
+
],
|
80 |
+
)
|
81 |
+
def test_fillna_raises(self, fillna_kwargs, msg):
|
82 |
+
# https://github.com/pandas-dev/pandas/issues/19682
|
83 |
+
# https://github.com/pandas-dev/pandas/issues/13628
|
84 |
+
cat = Categorical([1, 2, 3, None, None])
|
85 |
+
|
86 |
+
if len(fillna_kwargs) == 1 and "value" in fillna_kwargs:
|
87 |
+
err = TypeError
|
88 |
+
else:
|
89 |
+
err = ValueError
|
90 |
+
|
91 |
+
with pytest.raises(err, match=msg):
|
92 |
+
cat.fillna(**fillna_kwargs)
|
93 |
+
|
94 |
+
@pytest.mark.parametrize("named", [True, False])
|
95 |
+
def test_fillna_iterable_category(self, named):
|
96 |
+
# https://github.com/pandas-dev/pandas/issues/21097
|
97 |
+
if named:
|
98 |
+
Point = collections.namedtuple("Point", "x y")
|
99 |
+
else:
|
100 |
+
Point = lambda *args: args # tuple
|
101 |
+
cat = Categorical(np.array([Point(0, 0), Point(0, 1), None], dtype=object))
|
102 |
+
result = cat.fillna(Point(0, 0))
|
103 |
+
expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
|
104 |
+
|
105 |
+
tm.assert_categorical_equal(result, expected)
|
106 |
+
|
107 |
+
# Case where the Point is not among our categories; we want ValueError,
|
108 |
+
# not NotImplementedError GH#41914
|
109 |
+
cat = Categorical(np.array([Point(1, 0), Point(0, 1), None], dtype=object))
|
110 |
+
msg = "Cannot setitem on a Categorical with a new category"
|
111 |
+
with pytest.raises(TypeError, match=msg):
|
112 |
+
cat.fillna(Point(0, 0))
|
113 |
+
|
114 |
+
def test_fillna_array(self):
|
115 |
+
# accept Categorical or ndarray value if it holds appropriate values
|
116 |
+
cat = Categorical(["A", "B", "C", None, None])
|
117 |
+
|
118 |
+
other = cat.fillna("C")
|
119 |
+
result = cat.fillna(other)
|
120 |
+
tm.assert_categorical_equal(result, other)
|
121 |
+
assert isna(cat[-1]) # didn't modify original inplace
|
122 |
+
|
123 |
+
other = np.array(["A", "B", "C", "B", "A"])
|
124 |
+
result = cat.fillna(other)
|
125 |
+
expected = Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype)
|
126 |
+
tm.assert_categorical_equal(result, expected)
|
127 |
+
assert isna(cat[-1]) # didn't modify original inplace
|
128 |
+
|
129 |
+
@pytest.mark.parametrize(
|
130 |
+
"values, expected",
|
131 |
+
[
|
132 |
+
([1, 2, 3], np.array([False, False, False])),
|
133 |
+
([1, 2, np.nan], np.array([False, False, True])),
|
134 |
+
([1, 2, np.inf], np.array([False, False, True])),
|
135 |
+
([1, 2, pd.NA], np.array([False, False, True])),
|
136 |
+
],
|
137 |
+
)
|
138 |
+
def test_use_inf_as_na(self, values, expected):
|
139 |
+
# https://github.com/pandas-dev/pandas/issues/33594
|
140 |
+
msg = "use_inf_as_na option is deprecated"
|
141 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
142 |
+
with pd.option_context("mode.use_inf_as_na", True):
|
143 |
+
cat = Categorical(values)
|
144 |
+
result = cat.isna()
|
145 |
+
tm.assert_numpy_array_equal(result, expected)
|
146 |
+
|
147 |
+
result = Series(cat).isna()
|
148 |
+
expected = Series(expected)
|
149 |
+
tm.assert_series_equal(result, expected)
|
150 |
+
|
151 |
+
result = DataFrame(cat).isna()
|
152 |
+
expected = DataFrame(expected)
|
153 |
+
tm.assert_frame_equal(result, expected)
|
154 |
+
|
155 |
+
@pytest.mark.parametrize(
|
156 |
+
"values, expected",
|
157 |
+
[
|
158 |
+
([1, 2, 3], np.array([False, False, False])),
|
159 |
+
([1, 2, np.nan], np.array([False, False, True])),
|
160 |
+
([1, 2, np.inf], np.array([False, False, True])),
|
161 |
+
([1, 2, pd.NA], np.array([False, False, True])),
|
162 |
+
],
|
163 |
+
)
|
164 |
+
def test_use_inf_as_na_outside_context(self, values, expected):
|
165 |
+
# https://github.com/pandas-dev/pandas/issues/33594
|
166 |
+
# Using isna directly for Categorical will fail in general here
|
167 |
+
cat = Categorical(values)
|
168 |
+
|
169 |
+
msg = "use_inf_as_na option is deprecated"
|
170 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
171 |
+
with pd.option_context("mode.use_inf_as_na", True):
|
172 |
+
result = isna(cat)
|
173 |
+
tm.assert_numpy_array_equal(result, expected)
|
174 |
+
|
175 |
+
result = isna(Series(cat))
|
176 |
+
expected = Series(expected)
|
177 |
+
tm.assert_series_equal(result, expected)
|
178 |
+
|
179 |
+
result = isna(DataFrame(cat))
|
180 |
+
expected = DataFrame(expected)
|
181 |
+
tm.assert_frame_equal(result, expected)
|
182 |
+
|
183 |
+
@pytest.mark.parametrize(
|
184 |
+
"a1, a2, categories",
|
185 |
+
[
|
186 |
+
(["a", "b", "c"], [np.nan, "a", "b"], ["a", "b", "c"]),
|
187 |
+
([1, 2, 3], [np.nan, 1, 2], [1, 2, 3]),
|
188 |
+
],
|
189 |
+
)
|
190 |
+
def test_compare_categorical_with_missing(self, a1, a2, categories):
|
191 |
+
# GH 28384
|
192 |
+
cat_type = CategoricalDtype(categories)
|
193 |
+
|
194 |
+
# !=
|
195 |
+
result = Series(a1, dtype=cat_type) != Series(a2, dtype=cat_type)
|
196 |
+
expected = Series(a1) != Series(a2)
|
197 |
+
tm.assert_series_equal(result, expected)
|
198 |
+
|
199 |
+
# ==
|
200 |
+
result = Series(a1, dtype=cat_type) == Series(a2, dtype=cat_type)
|
201 |
+
expected = Series(a1) == Series(a2)
|
202 |
+
tm.assert_series_equal(result, expected)
|
203 |
+
|
204 |
+
@pytest.mark.parametrize(
|
205 |
+
"na_value, dtype",
|
206 |
+
[
|
207 |
+
(pd.NaT, "datetime64[ns]"),
|
208 |
+
(None, "float64"),
|
209 |
+
(np.nan, "float64"),
|
210 |
+
(pd.NA, "float64"),
|
211 |
+
],
|
212 |
+
)
|
213 |
+
def test_categorical_only_missing_values_no_cast(self, na_value, dtype):
|
214 |
+
# GH#44900
|
215 |
+
result = Categorical([na_value, na_value])
|
216 |
+
tm.assert_index_equal(result.categories, Index([], dtype=dtype))
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_replace.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
from pandas import Categorical
|
5 |
+
import pandas._testing as tm
|
6 |
+
|
7 |
+
|
8 |
+
@pytest.mark.parametrize(
|
9 |
+
"to_replace,value,expected,flip_categories",
|
10 |
+
[
|
11 |
+
# one-to-one
|
12 |
+
(1, 2, [2, 2, 3], False),
|
13 |
+
(1, 4, [4, 2, 3], False),
|
14 |
+
(4, 1, [1, 2, 3], False),
|
15 |
+
(5, 6, [1, 2, 3], False),
|
16 |
+
# many-to-one
|
17 |
+
([1], 2, [2, 2, 3], False),
|
18 |
+
([1, 2], 3, [3, 3, 3], False),
|
19 |
+
([1, 2], 4, [4, 4, 3], False),
|
20 |
+
((1, 2, 4), 5, [5, 5, 3], False),
|
21 |
+
((5, 6), 2, [1, 2, 3], False),
|
22 |
+
([1], [2], [2, 2, 3], False),
|
23 |
+
([1, 4], [5, 2], [5, 2, 3], False),
|
24 |
+
# GH49404: overlap between to_replace and value
|
25 |
+
([1, 2, 3], [2, 3, 4], [2, 3, 4], False),
|
26 |
+
# GH50872, GH46884: replace with null
|
27 |
+
(1, None, [None, 2, 3], False),
|
28 |
+
(1, pd.NA, [None, 2, 3], False),
|
29 |
+
# check_categorical sorts categories, which crashes on mixed dtypes
|
30 |
+
(3, "4", [1, 2, "4"], False),
|
31 |
+
([1, 2, "3"], "5", ["5", "5", 3], True),
|
32 |
+
],
|
33 |
+
)
|
34 |
+
@pytest.mark.filterwarnings(
|
35 |
+
"ignore:.*with CategoricalDtype is deprecated:FutureWarning"
|
36 |
+
)
|
37 |
+
def test_replace_categorical_series(to_replace, value, expected, flip_categories):
|
38 |
+
# GH 31720
|
39 |
+
|
40 |
+
ser = pd.Series([1, 2, 3], dtype="category")
|
41 |
+
result = ser.replace(to_replace, value)
|
42 |
+
expected = pd.Series(expected, dtype="category")
|
43 |
+
ser.replace(to_replace, value, inplace=True)
|
44 |
+
|
45 |
+
if flip_categories:
|
46 |
+
expected = expected.cat.set_categories(expected.cat.categories[::-1])
|
47 |
+
|
48 |
+
tm.assert_series_equal(expected, result, check_category_order=False)
|
49 |
+
tm.assert_series_equal(expected, ser, check_category_order=False)
|
50 |
+
|
51 |
+
|
52 |
+
@pytest.mark.parametrize(
|
53 |
+
"to_replace, value, result, expected_error_msg",
|
54 |
+
[
|
55 |
+
("b", "c", ["a", "c"], "Categorical.categories are different"),
|
56 |
+
("c", "d", ["a", "b"], None),
|
57 |
+
# https://github.com/pandas-dev/pandas/issues/33288
|
58 |
+
("a", "a", ["a", "b"], None),
|
59 |
+
("b", None, ["a", None], "Categorical.categories length are different"),
|
60 |
+
],
|
61 |
+
)
|
62 |
+
def test_replace_categorical(to_replace, value, result, expected_error_msg):
|
63 |
+
# GH#26988
|
64 |
+
cat = Categorical(["a", "b"])
|
65 |
+
expected = Categorical(result)
|
66 |
+
msg = (
|
67 |
+
r"The behavior of Series\.replace \(and DataFrame.replace\) "
|
68 |
+
"with CategoricalDtype"
|
69 |
+
)
|
70 |
+
warn = FutureWarning if expected_error_msg is not None else None
|
71 |
+
with tm.assert_produces_warning(warn, match=msg):
|
72 |
+
result = pd.Series(cat, copy=False).replace(to_replace, value)._values
|
73 |
+
|
74 |
+
tm.assert_categorical_equal(result, expected)
|
75 |
+
if to_replace == "b": # the "c" test is supposed to be unchanged
|
76 |
+
with pytest.raises(AssertionError, match=expected_error_msg):
|
77 |
+
# ensure non-inplace call does not affect original
|
78 |
+
tm.assert_categorical_equal(cat, expected)
|
79 |
+
|
80 |
+
ser = pd.Series(cat, copy=False)
|
81 |
+
with tm.assert_produces_warning(warn, match=msg):
|
82 |
+
ser.replace(to_replace, value, inplace=True)
|
83 |
+
tm.assert_categorical_equal(cat, expected)
|
84 |
+
|
85 |
+
|
86 |
+
def test_replace_categorical_ea_dtype():
|
87 |
+
# GH49404
|
88 |
+
cat = Categorical(pd.array(["a", "b"], dtype="string"))
|
89 |
+
msg = (
|
90 |
+
r"The behavior of Series\.replace \(and DataFrame.replace\) "
|
91 |
+
"with CategoricalDtype"
|
92 |
+
)
|
93 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
94 |
+
result = pd.Series(cat).replace(["a", "b"], ["c", pd.NA])._values
|
95 |
+
expected = Categorical(pd.array(["c", pd.NA], dtype="string"))
|
96 |
+
tm.assert_categorical_equal(result, expected)
|
97 |
+
|
98 |
+
|
99 |
+
def test_replace_maintain_ordering():
|
100 |
+
# GH51016
|
101 |
+
dtype = pd.CategoricalDtype([0, 1, 2], ordered=True)
|
102 |
+
ser = pd.Series([0, 1, 2], dtype=dtype)
|
103 |
+
msg = (
|
104 |
+
r"The behavior of Series\.replace \(and DataFrame.replace\) "
|
105 |
+
"with CategoricalDtype"
|
106 |
+
)
|
107 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
108 |
+
result = ser.replace(0, 2)
|
109 |
+
expected_dtype = pd.CategoricalDtype([1, 2], ordered=True)
|
110 |
+
expected = pd.Series([2, 1, 2], dtype=expected_dtype)
|
111 |
+
tm.assert_series_equal(expected, result, check_category_order=True)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_repr.py
ADDED
@@ -0,0 +1,550 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas._config import using_pyarrow_string_dtype
|
5 |
+
|
6 |
+
from pandas import (
|
7 |
+
Categorical,
|
8 |
+
CategoricalDtype,
|
9 |
+
CategoricalIndex,
|
10 |
+
Index,
|
11 |
+
Series,
|
12 |
+
date_range,
|
13 |
+
option_context,
|
14 |
+
period_range,
|
15 |
+
timedelta_range,
|
16 |
+
)
|
17 |
+
|
18 |
+
|
19 |
+
class TestCategoricalReprWithFactor:
|
20 |
+
def test_print(self, using_infer_string):
|
21 |
+
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
|
22 |
+
if using_infer_string:
|
23 |
+
expected = [
|
24 |
+
"['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']",
|
25 |
+
"Categories (3, string): [a < b < c]",
|
26 |
+
]
|
27 |
+
else:
|
28 |
+
expected = [
|
29 |
+
"['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']",
|
30 |
+
"Categories (3, object): ['a' < 'b' < 'c']",
|
31 |
+
]
|
32 |
+
expected = "\n".join(expected)
|
33 |
+
actual = repr(factor)
|
34 |
+
assert actual == expected
|
35 |
+
|
36 |
+
|
37 |
+
class TestCategoricalRepr:
|
38 |
+
def test_big_print(self):
|
39 |
+
codes = np.array([0, 1, 2, 0, 1, 2] * 100)
|
40 |
+
dtype = CategoricalDtype(categories=Index(["a", "b", "c"], dtype=object))
|
41 |
+
factor = Categorical.from_codes(codes, dtype=dtype)
|
42 |
+
expected = [
|
43 |
+
"['a', 'b', 'c', 'a', 'b', ..., 'b', 'c', 'a', 'b', 'c']",
|
44 |
+
"Length: 600",
|
45 |
+
"Categories (3, object): ['a', 'b', 'c']",
|
46 |
+
]
|
47 |
+
expected = "\n".join(expected)
|
48 |
+
|
49 |
+
actual = repr(factor)
|
50 |
+
|
51 |
+
assert actual == expected
|
52 |
+
|
53 |
+
def test_empty_print(self):
|
54 |
+
factor = Categorical([], Index(["a", "b", "c"], dtype=object))
|
55 |
+
expected = "[], Categories (3, object): ['a', 'b', 'c']"
|
56 |
+
actual = repr(factor)
|
57 |
+
assert actual == expected
|
58 |
+
|
59 |
+
assert expected == actual
|
60 |
+
factor = Categorical([], Index(["a", "b", "c"], dtype=object), ordered=True)
|
61 |
+
expected = "[], Categories (3, object): ['a' < 'b' < 'c']"
|
62 |
+
actual = repr(factor)
|
63 |
+
assert expected == actual
|
64 |
+
|
65 |
+
factor = Categorical([], [])
|
66 |
+
expected = "[], Categories (0, object): []"
|
67 |
+
assert expected == repr(factor)
|
68 |
+
|
69 |
+
def test_print_none_width(self):
|
70 |
+
# GH10087
|
71 |
+
a = Series(Categorical([1, 2, 3, 4]))
|
72 |
+
exp = (
|
73 |
+
"0 1\n1 2\n2 3\n3 4\n"
|
74 |
+
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]"
|
75 |
+
)
|
76 |
+
|
77 |
+
with option_context("display.width", None):
|
78 |
+
assert exp == repr(a)
|
79 |
+
|
80 |
+
@pytest.mark.skipif(
|
81 |
+
using_pyarrow_string_dtype(),
|
82 |
+
reason="Change once infer_string is set to True by default",
|
83 |
+
)
|
84 |
+
def test_unicode_print(self):
|
85 |
+
c = Categorical(["aaaaa", "bb", "cccc"] * 20)
|
86 |
+
expected = """\
|
87 |
+
['aaaaa', 'bb', 'cccc', 'aaaaa', 'bb', ..., 'bb', 'cccc', 'aaaaa', 'bb', 'cccc']
|
88 |
+
Length: 60
|
89 |
+
Categories (3, object): ['aaaaa', 'bb', 'cccc']"""
|
90 |
+
|
91 |
+
assert repr(c) == expected
|
92 |
+
|
93 |
+
c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20)
|
94 |
+
expected = """\
|
95 |
+
['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう']
|
96 |
+
Length: 60
|
97 |
+
Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501
|
98 |
+
|
99 |
+
assert repr(c) == expected
|
100 |
+
|
101 |
+
# unicode option should not affect to Categorical, as it doesn't care
|
102 |
+
# the repr width
|
103 |
+
with option_context("display.unicode.east_asian_width", True):
|
104 |
+
c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20)
|
105 |
+
expected = """['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう']
|
106 |
+
Length: 60
|
107 |
+
Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501
|
108 |
+
|
109 |
+
assert repr(c) == expected
|
110 |
+
|
111 |
+
def test_categorical_repr(self):
|
112 |
+
c = Categorical([1, 2, 3])
|
113 |
+
exp = """[1, 2, 3]
|
114 |
+
Categories (3, int64): [1, 2, 3]"""
|
115 |
+
|
116 |
+
assert repr(c) == exp
|
117 |
+
|
118 |
+
c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
|
119 |
+
exp = """[1, 2, 3, 1, 2, 3]
|
120 |
+
Categories (3, int64): [1, 2, 3]"""
|
121 |
+
|
122 |
+
assert repr(c) == exp
|
123 |
+
|
124 |
+
c = Categorical([1, 2, 3, 4, 5] * 10)
|
125 |
+
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
|
126 |
+
Length: 50
|
127 |
+
Categories (5, int64): [1, 2, 3, 4, 5]"""
|
128 |
+
|
129 |
+
assert repr(c) == exp
|
130 |
+
|
131 |
+
c = Categorical(np.arange(20, dtype=np.int64))
|
132 |
+
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
|
133 |
+
Length: 20
|
134 |
+
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
|
135 |
+
|
136 |
+
assert repr(c) == exp
|
137 |
+
|
138 |
+
def test_categorical_repr_ordered(self):
|
139 |
+
c = Categorical([1, 2, 3], ordered=True)
|
140 |
+
exp = """[1, 2, 3]
|
141 |
+
Categories (3, int64): [1 < 2 < 3]"""
|
142 |
+
|
143 |
+
assert repr(c) == exp
|
144 |
+
|
145 |
+
c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True)
|
146 |
+
exp = """[1, 2, 3, 1, 2, 3]
|
147 |
+
Categories (3, int64): [1 < 2 < 3]"""
|
148 |
+
|
149 |
+
assert repr(c) == exp
|
150 |
+
|
151 |
+
c = Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
|
152 |
+
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
|
153 |
+
Length: 50
|
154 |
+
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
|
155 |
+
|
156 |
+
assert repr(c) == exp
|
157 |
+
|
158 |
+
c = Categorical(np.arange(20, dtype=np.int64), ordered=True)
|
159 |
+
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
|
160 |
+
Length: 20
|
161 |
+
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
|
162 |
+
|
163 |
+
assert repr(c) == exp
|
164 |
+
|
165 |
+
def test_categorical_repr_datetime(self):
|
166 |
+
idx = date_range("2011-01-01 09:00", freq="h", periods=5)
|
167 |
+
c = Categorical(idx)
|
168 |
+
|
169 |
+
exp = (
|
170 |
+
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
|
171 |
+
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
|
172 |
+
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
|
173 |
+
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
|
174 |
+
" 2011-01-01 12:00:00, "
|
175 |
+
"2011-01-01 13:00:00]"
|
176 |
+
""
|
177 |
+
)
|
178 |
+
assert repr(c) == exp
|
179 |
+
|
180 |
+
c = Categorical(idx.append(idx), categories=idx)
|
181 |
+
exp = (
|
182 |
+
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
|
183 |
+
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
|
184 |
+
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
|
185 |
+
"2011-01-01 13:00:00]\n"
|
186 |
+
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
|
187 |
+
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
|
188 |
+
" 2011-01-01 12:00:00, "
|
189 |
+
"2011-01-01 13:00:00]"
|
190 |
+
)
|
191 |
+
|
192 |
+
assert repr(c) == exp
|
193 |
+
|
194 |
+
idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern")
|
195 |
+
c = Categorical(idx)
|
196 |
+
exp = (
|
197 |
+
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
|
198 |
+
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
|
199 |
+
"2011-01-01 13:00:00-05:00]\n"
|
200 |
+
"Categories (5, datetime64[ns, US/Eastern]): "
|
201 |
+
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
|
202 |
+
" "
|
203 |
+
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
|
204 |
+
" "
|
205 |
+
"2011-01-01 13:00:00-05:00]"
|
206 |
+
)
|
207 |
+
|
208 |
+
assert repr(c) == exp
|
209 |
+
|
210 |
+
c = Categorical(idx.append(idx), categories=idx)
|
211 |
+
exp = (
|
212 |
+
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
|
213 |
+
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
|
214 |
+
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
|
215 |
+
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
|
216 |
+
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
|
217 |
+
"Categories (5, datetime64[ns, US/Eastern]): "
|
218 |
+
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
|
219 |
+
" "
|
220 |
+
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
|
221 |
+
" "
|
222 |
+
"2011-01-01 13:00:00-05:00]"
|
223 |
+
)
|
224 |
+
|
225 |
+
assert repr(c) == exp
|
226 |
+
|
227 |
+
def test_categorical_repr_datetime_ordered(self):
|
228 |
+
idx = date_range("2011-01-01 09:00", freq="h", periods=5)
|
229 |
+
c = Categorical(idx, ordered=True)
|
230 |
+
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
|
231 |
+
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
|
232 |
+
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501
|
233 |
+
|
234 |
+
assert repr(c) == exp
|
235 |
+
|
236 |
+
c = Categorical(idx.append(idx), categories=idx, ordered=True)
|
237 |
+
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
|
238 |
+
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
|
239 |
+
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501
|
240 |
+
|
241 |
+
assert repr(c) == exp
|
242 |
+
|
243 |
+
idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern")
|
244 |
+
c = Categorical(idx, ordered=True)
|
245 |
+
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
|
246 |
+
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
|
247 |
+
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
|
248 |
+
2011-01-01 13:00:00-05:00]""" # noqa: E501
|
249 |
+
|
250 |
+
assert repr(c) == exp
|
251 |
+
|
252 |
+
c = Categorical(idx.append(idx), categories=idx, ordered=True)
|
253 |
+
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
|
254 |
+
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
|
255 |
+
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
|
256 |
+
2011-01-01 13:00:00-05:00]""" # noqa: E501
|
257 |
+
|
258 |
+
assert repr(c) == exp
|
259 |
+
|
260 |
+
def test_categorical_repr_int_with_nan(self):
|
261 |
+
c = Categorical([1, 2, np.nan])
|
262 |
+
c_exp = """[1, 2, NaN]\nCategories (2, int64): [1, 2]"""
|
263 |
+
assert repr(c) == c_exp
|
264 |
+
|
265 |
+
s = Series([1, 2, np.nan], dtype="object").astype("category")
|
266 |
+
s_exp = """0 1\n1 2\n2 NaN
|
267 |
+
dtype: category
|
268 |
+
Categories (2, int64): [1, 2]"""
|
269 |
+
assert repr(s) == s_exp
|
270 |
+
|
271 |
+
def test_categorical_repr_period(self):
|
272 |
+
idx = period_range("2011-01-01 09:00", freq="h", periods=5)
|
273 |
+
c = Categorical(idx)
|
274 |
+
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
|
275 |
+
Categories (5, period[h]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
|
276 |
+
2011-01-01 13:00]""" # noqa: E501
|
277 |
+
|
278 |
+
assert repr(c) == exp
|
279 |
+
|
280 |
+
c = Categorical(idx.append(idx), categories=idx)
|
281 |
+
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
|
282 |
+
Categories (5, period[h]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
|
283 |
+
2011-01-01 13:00]""" # noqa: E501
|
284 |
+
|
285 |
+
assert repr(c) == exp
|
286 |
+
|
287 |
+
idx = period_range("2011-01", freq="M", periods=5)
|
288 |
+
c = Categorical(idx)
|
289 |
+
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
|
290 |
+
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
|
291 |
+
|
292 |
+
assert repr(c) == exp
|
293 |
+
|
294 |
+
c = Categorical(idx.append(idx), categories=idx)
|
295 |
+
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
|
296 |
+
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa: E501
|
297 |
+
|
298 |
+
assert repr(c) == exp
|
299 |
+
|
300 |
+
def test_categorical_repr_period_ordered(self):
|
301 |
+
idx = period_range("2011-01-01 09:00", freq="h", periods=5)
|
302 |
+
c = Categorical(idx, ordered=True)
|
303 |
+
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
|
304 |
+
Categories (5, period[h]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
|
305 |
+
2011-01-01 13:00]""" # noqa: E501
|
306 |
+
|
307 |
+
assert repr(c) == exp
|
308 |
+
|
309 |
+
c = Categorical(idx.append(idx), categories=idx, ordered=True)
|
310 |
+
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
|
311 |
+
Categories (5, period[h]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
|
312 |
+
2011-01-01 13:00]""" # noqa: E501
|
313 |
+
|
314 |
+
assert repr(c) == exp
|
315 |
+
|
316 |
+
idx = period_range("2011-01", freq="M", periods=5)
|
317 |
+
c = Categorical(idx, ordered=True)
|
318 |
+
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
|
319 |
+
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
|
320 |
+
|
321 |
+
assert repr(c) == exp
|
322 |
+
|
323 |
+
c = Categorical(idx.append(idx), categories=idx, ordered=True)
|
324 |
+
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
|
325 |
+
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa: E501
|
326 |
+
|
327 |
+
assert repr(c) == exp
|
328 |
+
|
329 |
+
def test_categorical_repr_timedelta(self):
|
330 |
+
idx = timedelta_range("1 days", periods=5)
|
331 |
+
c = Categorical(idx)
|
332 |
+
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
|
333 |
+
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
|
334 |
+
|
335 |
+
assert repr(c) == exp
|
336 |
+
|
337 |
+
c = Categorical(idx.append(idx), categories=idx)
|
338 |
+
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
|
339 |
+
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa: E501
|
340 |
+
|
341 |
+
assert repr(c) == exp
|
342 |
+
|
343 |
+
idx = timedelta_range("1 hours", periods=20)
|
344 |
+
c = Categorical(idx)
|
345 |
+
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
|
346 |
+
Length: 20
|
347 |
+
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
|
348 |
+
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
|
349 |
+
18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501
|
350 |
+
|
351 |
+
assert repr(c) == exp
|
352 |
+
|
353 |
+
c = Categorical(idx.append(idx), categories=idx)
|
354 |
+
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
|
355 |
+
Length: 40
|
356 |
+
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
|
357 |
+
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
|
358 |
+
18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501
|
359 |
+
|
360 |
+
assert repr(c) == exp
|
361 |
+
|
362 |
+
def test_categorical_repr_timedelta_ordered(self):
|
363 |
+
idx = timedelta_range("1 days", periods=5)
|
364 |
+
c = Categorical(idx, ordered=True)
|
365 |
+
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
|
366 |
+
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
|
367 |
+
|
368 |
+
assert repr(c) == exp
|
369 |
+
|
370 |
+
c = Categorical(idx.append(idx), categories=idx, ordered=True)
|
371 |
+
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
|
372 |
+
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa: E501
|
373 |
+
|
374 |
+
assert repr(c) == exp
|
375 |
+
|
376 |
+
idx = timedelta_range("1 hours", periods=20)
|
377 |
+
c = Categorical(idx, ordered=True)
|
378 |
+
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
|
379 |
+
Length: 20
|
380 |
+
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
|
381 |
+
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
|
382 |
+
18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501
|
383 |
+
|
384 |
+
assert repr(c) == exp
|
385 |
+
|
386 |
+
c = Categorical(idx.append(idx), categories=idx, ordered=True)
|
387 |
+
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
|
388 |
+
Length: 40
|
389 |
+
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
|
390 |
+
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
|
391 |
+
18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501
|
392 |
+
|
393 |
+
assert repr(c) == exp
|
394 |
+
|
395 |
+
def test_categorical_index_repr(self):
|
396 |
+
idx = CategoricalIndex(Categorical([1, 2, 3]))
|
397 |
+
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa: E501
|
398 |
+
assert repr(idx) == exp
|
399 |
+
|
400 |
+
i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64)))
|
401 |
+
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=False, dtype='category')""" # noqa: E501
|
402 |
+
assert repr(i) == exp
|
403 |
+
|
404 |
+
def test_categorical_index_repr_ordered(self):
|
405 |
+
i = CategoricalIndex(Categorical([1, 2, 3], ordered=True))
|
406 |
+
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa: E501
|
407 |
+
assert repr(i) == exp
|
408 |
+
|
409 |
+
i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64), ordered=True))
|
410 |
+
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=True, dtype='category')""" # noqa: E501
|
411 |
+
assert repr(i) == exp
|
412 |
+
|
413 |
+
def test_categorical_index_repr_datetime(self):
|
414 |
+
idx = date_range("2011-01-01 09:00", freq="h", periods=5)
|
415 |
+
i = CategoricalIndex(Categorical(idx))
|
416 |
+
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
|
417 |
+
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
|
418 |
+
'2011-01-01 13:00:00'],
|
419 |
+
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa: E501
|
420 |
+
|
421 |
+
assert repr(i) == exp
|
422 |
+
|
423 |
+
idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern")
|
424 |
+
i = CategoricalIndex(Categorical(idx))
|
425 |
+
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
|
426 |
+
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
|
427 |
+
'2011-01-01 13:00:00-05:00'],
|
428 |
+
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa: E501
|
429 |
+
|
430 |
+
assert repr(i) == exp
|
431 |
+
|
432 |
+
def test_categorical_index_repr_datetime_ordered(self):
|
433 |
+
idx = date_range("2011-01-01 09:00", freq="h", periods=5)
|
434 |
+
i = CategoricalIndex(Categorical(idx, ordered=True))
|
435 |
+
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
|
436 |
+
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
|
437 |
+
'2011-01-01 13:00:00'],
|
438 |
+
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa: E501
|
439 |
+
|
440 |
+
assert repr(i) == exp
|
441 |
+
|
442 |
+
idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern")
|
443 |
+
i = CategoricalIndex(Categorical(idx, ordered=True))
|
444 |
+
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
|
445 |
+
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
|
446 |
+
'2011-01-01 13:00:00-05:00'],
|
447 |
+
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501
|
448 |
+
|
449 |
+
assert repr(i) == exp
|
450 |
+
|
451 |
+
i = CategoricalIndex(Categorical(idx.append(idx), ordered=True))
|
452 |
+
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
|
453 |
+
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
|
454 |
+
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
|
455 |
+
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
|
456 |
+
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
|
457 |
+
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501
|
458 |
+
|
459 |
+
assert repr(i) == exp
|
460 |
+
|
461 |
+
def test_categorical_index_repr_period(self):
|
462 |
+
# test all length
|
463 |
+
idx = period_range("2011-01-01 09:00", freq="h", periods=1)
|
464 |
+
i = CategoricalIndex(Categorical(idx))
|
465 |
+
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa: E501
|
466 |
+
assert repr(i) == exp
|
467 |
+
|
468 |
+
idx = period_range("2011-01-01 09:00", freq="h", periods=2)
|
469 |
+
i = CategoricalIndex(Categorical(idx))
|
470 |
+
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa: E501
|
471 |
+
assert repr(i) == exp
|
472 |
+
|
473 |
+
idx = period_range("2011-01-01 09:00", freq="h", periods=3)
|
474 |
+
i = CategoricalIndex(Categorical(idx))
|
475 |
+
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa: E501
|
476 |
+
assert repr(i) == exp
|
477 |
+
|
478 |
+
idx = period_range("2011-01-01 09:00", freq="h", periods=5)
|
479 |
+
i = CategoricalIndex(Categorical(idx))
|
480 |
+
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
|
481 |
+
'2011-01-01 12:00', '2011-01-01 13:00'],
|
482 |
+
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501
|
483 |
+
|
484 |
+
assert repr(i) == exp
|
485 |
+
|
486 |
+
i = CategoricalIndex(Categorical(idx.append(idx)))
|
487 |
+
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
|
488 |
+
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
|
489 |
+
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
|
490 |
+
'2011-01-01 13:00'],
|
491 |
+
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501
|
492 |
+
|
493 |
+
assert repr(i) == exp
|
494 |
+
|
495 |
+
idx = period_range("2011-01", freq="M", periods=5)
|
496 |
+
i = CategoricalIndex(Categorical(idx))
|
497 |
+
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa: E501
|
498 |
+
assert repr(i) == exp
|
499 |
+
|
500 |
+
def test_categorical_index_repr_period_ordered(self):
|
501 |
+
idx = period_range("2011-01-01 09:00", freq="h", periods=5)
|
502 |
+
i = CategoricalIndex(Categorical(idx, ordered=True))
|
503 |
+
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
|
504 |
+
'2011-01-01 12:00', '2011-01-01 13:00'],
|
505 |
+
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa: E501
|
506 |
+
|
507 |
+
assert repr(i) == exp
|
508 |
+
|
509 |
+
idx = period_range("2011-01", freq="M", periods=5)
|
510 |
+
i = CategoricalIndex(Categorical(idx, ordered=True))
|
511 |
+
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa: E501
|
512 |
+
assert repr(i) == exp
|
513 |
+
|
514 |
+
def test_categorical_index_repr_timedelta(self):
|
515 |
+
idx = timedelta_range("1 days", periods=5)
|
516 |
+
i = CategoricalIndex(Categorical(idx))
|
517 |
+
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=False, dtype='category')""" # noqa: E501
|
518 |
+
assert repr(i) == exp
|
519 |
+
|
520 |
+
idx = timedelta_range("1 hours", periods=10)
|
521 |
+
i = CategoricalIndex(Categorical(idx))
|
522 |
+
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
|
523 |
+
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
|
524 |
+
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
|
525 |
+
'9 days 01:00:00'],
|
526 |
+
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=False, dtype='category')""" # noqa: E501
|
527 |
+
|
528 |
+
assert repr(i) == exp
|
529 |
+
|
530 |
+
def test_categorical_index_repr_timedelta_ordered(self):
|
531 |
+
idx = timedelta_range("1 days", periods=5)
|
532 |
+
i = CategoricalIndex(Categorical(idx, ordered=True))
|
533 |
+
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=True, dtype='category')""" # noqa: E501
|
534 |
+
assert repr(i) == exp
|
535 |
+
|
536 |
+
idx = timedelta_range("1 hours", periods=10)
|
537 |
+
i = CategoricalIndex(Categorical(idx, ordered=True))
|
538 |
+
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
|
539 |
+
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
|
540 |
+
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
|
541 |
+
'9 days 01:00:00'],
|
542 |
+
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=True, dtype='category')""" # noqa: E501
|
543 |
+
|
544 |
+
assert repr(i) == exp
|
545 |
+
|
546 |
+
def test_categorical_str_repr(self):
|
547 |
+
# GH 33676
|
548 |
+
result = repr(Categorical([1, "2", 3, 4]))
|
549 |
+
expected = "[1, '2', 3, 4]\nCategories (4, object): [1, 3, 4, '2']"
|
550 |
+
assert result == expected
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_array.py
ADDED
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import decimal
|
3 |
+
import re
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import pytest
|
7 |
+
import pytz
|
8 |
+
|
9 |
+
import pandas as pd
|
10 |
+
import pandas._testing as tm
|
11 |
+
from pandas.api.extensions import register_extension_dtype
|
12 |
+
from pandas.arrays import (
|
13 |
+
BooleanArray,
|
14 |
+
DatetimeArray,
|
15 |
+
FloatingArray,
|
16 |
+
IntegerArray,
|
17 |
+
IntervalArray,
|
18 |
+
SparseArray,
|
19 |
+
TimedeltaArray,
|
20 |
+
)
|
21 |
+
from pandas.core.arrays import (
|
22 |
+
NumpyExtensionArray,
|
23 |
+
period_array,
|
24 |
+
)
|
25 |
+
from pandas.tests.extension.decimal import (
|
26 |
+
DecimalArray,
|
27 |
+
DecimalDtype,
|
28 |
+
to_decimal,
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
@pytest.mark.parametrize("dtype_unit", ["M8[h]", "M8[m]", "m8[h]", "M8[m]"])
|
33 |
+
def test_dt64_array(dtype_unit):
|
34 |
+
# PR 53817
|
35 |
+
dtype_var = np.dtype(dtype_unit)
|
36 |
+
msg = (
|
37 |
+
r"datetime64 and timedelta64 dtype resolutions other than "
|
38 |
+
r"'s', 'ms', 'us', and 'ns' are deprecated. "
|
39 |
+
r"In future releases passing unsupported resolutions will "
|
40 |
+
r"raise an exception."
|
41 |
+
)
|
42 |
+
with tm.assert_produces_warning(FutureWarning, match=re.escape(msg)):
|
43 |
+
pd.array([], dtype=dtype_var)
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.mark.parametrize(
|
47 |
+
"data, dtype, expected",
|
48 |
+
[
|
49 |
+
# Basic NumPy defaults.
|
50 |
+
([], None, FloatingArray._from_sequence([], dtype="Float64")),
|
51 |
+
([1, 2], None, IntegerArray._from_sequence([1, 2], dtype="Int64")),
|
52 |
+
([1, 2], object, NumpyExtensionArray(np.array([1, 2], dtype=object))),
|
53 |
+
(
|
54 |
+
[1, 2],
|
55 |
+
np.dtype("float32"),
|
56 |
+
NumpyExtensionArray(np.array([1.0, 2.0], dtype=np.dtype("float32"))),
|
57 |
+
),
|
58 |
+
(
|
59 |
+
np.array([], dtype=object),
|
60 |
+
None,
|
61 |
+
NumpyExtensionArray(np.array([], dtype=object)),
|
62 |
+
),
|
63 |
+
(
|
64 |
+
np.array([1, 2], dtype="int64"),
|
65 |
+
None,
|
66 |
+
IntegerArray._from_sequence([1, 2], dtype="Int64"),
|
67 |
+
),
|
68 |
+
(
|
69 |
+
np.array([1.0, 2.0], dtype="float64"),
|
70 |
+
None,
|
71 |
+
FloatingArray._from_sequence([1.0, 2.0], dtype="Float64"),
|
72 |
+
),
|
73 |
+
# String alias passes through to NumPy
|
74 |
+
([1, 2], "float32", NumpyExtensionArray(np.array([1, 2], dtype="float32"))),
|
75 |
+
([1, 2], "int64", NumpyExtensionArray(np.array([1, 2], dtype=np.int64))),
|
76 |
+
# GH#44715 FloatingArray does not support float16, so fall
|
77 |
+
# back to NumpyExtensionArray
|
78 |
+
(
|
79 |
+
np.array([1, 2], dtype=np.float16),
|
80 |
+
None,
|
81 |
+
NumpyExtensionArray(np.array([1, 2], dtype=np.float16)),
|
82 |
+
),
|
83 |
+
# idempotency with e.g. pd.array(pd.array([1, 2], dtype="int64"))
|
84 |
+
(
|
85 |
+
NumpyExtensionArray(np.array([1, 2], dtype=np.int32)),
|
86 |
+
None,
|
87 |
+
NumpyExtensionArray(np.array([1, 2], dtype=np.int32)),
|
88 |
+
),
|
89 |
+
# Period alias
|
90 |
+
(
|
91 |
+
[pd.Period("2000", "D"), pd.Period("2001", "D")],
|
92 |
+
"Period[D]",
|
93 |
+
period_array(["2000", "2001"], freq="D"),
|
94 |
+
),
|
95 |
+
# Period dtype
|
96 |
+
(
|
97 |
+
[pd.Period("2000", "D")],
|
98 |
+
pd.PeriodDtype("D"),
|
99 |
+
period_array(["2000"], freq="D"),
|
100 |
+
),
|
101 |
+
# Datetime (naive)
|
102 |
+
(
|
103 |
+
[1, 2],
|
104 |
+
np.dtype("datetime64[ns]"),
|
105 |
+
DatetimeArray._from_sequence(
|
106 |
+
np.array([1, 2], dtype="M8[ns]"), dtype="M8[ns]"
|
107 |
+
),
|
108 |
+
),
|
109 |
+
(
|
110 |
+
[1, 2],
|
111 |
+
np.dtype("datetime64[s]"),
|
112 |
+
DatetimeArray._from_sequence(
|
113 |
+
np.array([1, 2], dtype="M8[s]"), dtype="M8[s]"
|
114 |
+
),
|
115 |
+
),
|
116 |
+
(
|
117 |
+
np.array([1, 2], dtype="datetime64[ns]"),
|
118 |
+
None,
|
119 |
+
DatetimeArray._from_sequence(
|
120 |
+
np.array([1, 2], dtype="M8[ns]"), dtype="M8[ns]"
|
121 |
+
),
|
122 |
+
),
|
123 |
+
(
|
124 |
+
pd.DatetimeIndex(["2000", "2001"]),
|
125 |
+
np.dtype("datetime64[ns]"),
|
126 |
+
DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),
|
127 |
+
),
|
128 |
+
(
|
129 |
+
pd.DatetimeIndex(["2000", "2001"]),
|
130 |
+
None,
|
131 |
+
DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),
|
132 |
+
),
|
133 |
+
(
|
134 |
+
["2000", "2001"],
|
135 |
+
np.dtype("datetime64[ns]"),
|
136 |
+
DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),
|
137 |
+
),
|
138 |
+
# Datetime (tz-aware)
|
139 |
+
(
|
140 |
+
["2000", "2001"],
|
141 |
+
pd.DatetimeTZDtype(tz="CET"),
|
142 |
+
DatetimeArray._from_sequence(
|
143 |
+
["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET")
|
144 |
+
),
|
145 |
+
),
|
146 |
+
# Timedelta
|
147 |
+
(
|
148 |
+
["1h", "2h"],
|
149 |
+
np.dtype("timedelta64[ns]"),
|
150 |
+
TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"),
|
151 |
+
),
|
152 |
+
(
|
153 |
+
pd.TimedeltaIndex(["1h", "2h"]),
|
154 |
+
np.dtype("timedelta64[ns]"),
|
155 |
+
TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"),
|
156 |
+
),
|
157 |
+
(
|
158 |
+
np.array([1, 2], dtype="m8[s]"),
|
159 |
+
np.dtype("timedelta64[s]"),
|
160 |
+
TimedeltaArray._from_sequence(
|
161 |
+
np.array([1, 2], dtype="m8[s]"), dtype="m8[s]"
|
162 |
+
),
|
163 |
+
),
|
164 |
+
(
|
165 |
+
pd.TimedeltaIndex(["1h", "2h"]),
|
166 |
+
None,
|
167 |
+
TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"),
|
168 |
+
),
|
169 |
+
(
|
170 |
+
# preserve non-nano, i.e. don't cast to NumpyExtensionArray
|
171 |
+
TimedeltaArray._simple_new(
|
172 |
+
np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]")
|
173 |
+
),
|
174 |
+
None,
|
175 |
+
TimedeltaArray._simple_new(
|
176 |
+
np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]")
|
177 |
+
),
|
178 |
+
),
|
179 |
+
(
|
180 |
+
# preserve non-nano, i.e. don't cast to NumpyExtensionArray
|
181 |
+
TimedeltaArray._simple_new(
|
182 |
+
np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]")
|
183 |
+
),
|
184 |
+
np.dtype("m8[s]"),
|
185 |
+
TimedeltaArray._simple_new(
|
186 |
+
np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]")
|
187 |
+
),
|
188 |
+
),
|
189 |
+
# Category
|
190 |
+
(["a", "b"], "category", pd.Categorical(["a", "b"])),
|
191 |
+
(
|
192 |
+
["a", "b"],
|
193 |
+
pd.CategoricalDtype(None, ordered=True),
|
194 |
+
pd.Categorical(["a", "b"], ordered=True),
|
195 |
+
),
|
196 |
+
# Interval
|
197 |
+
(
|
198 |
+
[pd.Interval(1, 2), pd.Interval(3, 4)],
|
199 |
+
"interval",
|
200 |
+
IntervalArray.from_tuples([(1, 2), (3, 4)]),
|
201 |
+
),
|
202 |
+
# Sparse
|
203 |
+
([0, 1], "Sparse[int64]", SparseArray([0, 1], dtype="int64")),
|
204 |
+
# IntegerNA
|
205 |
+
([1, None], "Int16", pd.array([1, None], dtype="Int16")),
|
206 |
+
(
|
207 |
+
pd.Series([1, 2]),
|
208 |
+
None,
|
209 |
+
NumpyExtensionArray(np.array([1, 2], dtype=np.int64)),
|
210 |
+
),
|
211 |
+
# String
|
212 |
+
(
|
213 |
+
["a", None],
|
214 |
+
"string",
|
215 |
+
pd.StringDtype()
|
216 |
+
.construct_array_type()
|
217 |
+
._from_sequence(["a", None], dtype=pd.StringDtype()),
|
218 |
+
),
|
219 |
+
(
|
220 |
+
["a", None],
|
221 |
+
pd.StringDtype(),
|
222 |
+
pd.StringDtype()
|
223 |
+
.construct_array_type()
|
224 |
+
._from_sequence(["a", None], dtype=pd.StringDtype()),
|
225 |
+
),
|
226 |
+
# Boolean
|
227 |
+
(
|
228 |
+
[True, None],
|
229 |
+
"boolean",
|
230 |
+
BooleanArray._from_sequence([True, None], dtype="boolean"),
|
231 |
+
),
|
232 |
+
(
|
233 |
+
[True, None],
|
234 |
+
pd.BooleanDtype(),
|
235 |
+
BooleanArray._from_sequence([True, None], dtype="boolean"),
|
236 |
+
),
|
237 |
+
# Index
|
238 |
+
(pd.Index([1, 2]), None, NumpyExtensionArray(np.array([1, 2], dtype=np.int64))),
|
239 |
+
# Series[EA] returns the EA
|
240 |
+
(
|
241 |
+
pd.Series(pd.Categorical(["a", "b"], categories=["a", "b", "c"])),
|
242 |
+
None,
|
243 |
+
pd.Categorical(["a", "b"], categories=["a", "b", "c"]),
|
244 |
+
),
|
245 |
+
# "3rd party" EAs work
|
246 |
+
([decimal.Decimal(0), decimal.Decimal(1)], "decimal", to_decimal([0, 1])),
|
247 |
+
# pass an ExtensionArray, but a different dtype
|
248 |
+
(
|
249 |
+
period_array(["2000", "2001"], freq="D"),
|
250 |
+
"category",
|
251 |
+
pd.Categorical([pd.Period("2000", "D"), pd.Period("2001", "D")]),
|
252 |
+
),
|
253 |
+
],
|
254 |
+
)
|
255 |
+
def test_array(data, dtype, expected):
|
256 |
+
result = pd.array(data, dtype=dtype)
|
257 |
+
tm.assert_equal(result, expected)
|
258 |
+
|
259 |
+
|
260 |
+
def test_array_copy():
|
261 |
+
a = np.array([1, 2])
|
262 |
+
# default is to copy
|
263 |
+
b = pd.array(a, dtype=a.dtype)
|
264 |
+
assert not tm.shares_memory(a, b)
|
265 |
+
|
266 |
+
# copy=True
|
267 |
+
b = pd.array(a, dtype=a.dtype, copy=True)
|
268 |
+
assert not tm.shares_memory(a, b)
|
269 |
+
|
270 |
+
# copy=False
|
271 |
+
b = pd.array(a, dtype=a.dtype, copy=False)
|
272 |
+
assert tm.shares_memory(a, b)
|
273 |
+
|
274 |
+
|
275 |
+
cet = pytz.timezone("CET")
|
276 |
+
|
277 |
+
|
278 |
+
@pytest.mark.parametrize(
|
279 |
+
"data, expected",
|
280 |
+
[
|
281 |
+
# period
|
282 |
+
(
|
283 |
+
[pd.Period("2000", "D"), pd.Period("2001", "D")],
|
284 |
+
period_array(["2000", "2001"], freq="D"),
|
285 |
+
),
|
286 |
+
# interval
|
287 |
+
([pd.Interval(0, 1), pd.Interval(1, 2)], IntervalArray.from_breaks([0, 1, 2])),
|
288 |
+
# datetime
|
289 |
+
(
|
290 |
+
[pd.Timestamp("2000"), pd.Timestamp("2001")],
|
291 |
+
DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),
|
292 |
+
),
|
293 |
+
(
|
294 |
+
[datetime.datetime(2000, 1, 1), datetime.datetime(2001, 1, 1)],
|
295 |
+
DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"),
|
296 |
+
),
|
297 |
+
(
|
298 |
+
np.array([1, 2], dtype="M8[ns]"),
|
299 |
+
DatetimeArray._from_sequence(np.array([1, 2], dtype="M8[ns]")),
|
300 |
+
),
|
301 |
+
(
|
302 |
+
np.array([1, 2], dtype="M8[us]"),
|
303 |
+
DatetimeArray._simple_new(
|
304 |
+
np.array([1, 2], dtype="M8[us]"), dtype=np.dtype("M8[us]")
|
305 |
+
),
|
306 |
+
),
|
307 |
+
# datetimetz
|
308 |
+
(
|
309 |
+
[pd.Timestamp("2000", tz="CET"), pd.Timestamp("2001", tz="CET")],
|
310 |
+
DatetimeArray._from_sequence(
|
311 |
+
["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET", unit="ns")
|
312 |
+
),
|
313 |
+
),
|
314 |
+
(
|
315 |
+
[
|
316 |
+
datetime.datetime(2000, 1, 1, tzinfo=cet),
|
317 |
+
datetime.datetime(2001, 1, 1, tzinfo=cet),
|
318 |
+
],
|
319 |
+
DatetimeArray._from_sequence(
|
320 |
+
["2000", "2001"], dtype=pd.DatetimeTZDtype(tz=cet, unit="ns")
|
321 |
+
),
|
322 |
+
),
|
323 |
+
# timedelta
|
324 |
+
(
|
325 |
+
[pd.Timedelta("1h"), pd.Timedelta("2h")],
|
326 |
+
TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"),
|
327 |
+
),
|
328 |
+
(
|
329 |
+
np.array([1, 2], dtype="m8[ns]"),
|
330 |
+
TimedeltaArray._from_sequence(np.array([1, 2], dtype="m8[ns]")),
|
331 |
+
),
|
332 |
+
(
|
333 |
+
np.array([1, 2], dtype="m8[us]"),
|
334 |
+
TimedeltaArray._from_sequence(np.array([1, 2], dtype="m8[us]")),
|
335 |
+
),
|
336 |
+
# integer
|
337 |
+
([1, 2], IntegerArray._from_sequence([1, 2], dtype="Int64")),
|
338 |
+
([1, None], IntegerArray._from_sequence([1, None], dtype="Int64")),
|
339 |
+
([1, pd.NA], IntegerArray._from_sequence([1, pd.NA], dtype="Int64")),
|
340 |
+
([1, np.nan], IntegerArray._from_sequence([1, np.nan], dtype="Int64")),
|
341 |
+
# float
|
342 |
+
([0.1, 0.2], FloatingArray._from_sequence([0.1, 0.2], dtype="Float64")),
|
343 |
+
([0.1, None], FloatingArray._from_sequence([0.1, pd.NA], dtype="Float64")),
|
344 |
+
([0.1, np.nan], FloatingArray._from_sequence([0.1, pd.NA], dtype="Float64")),
|
345 |
+
([0.1, pd.NA], FloatingArray._from_sequence([0.1, pd.NA], dtype="Float64")),
|
346 |
+
# integer-like float
|
347 |
+
([1.0, 2.0], FloatingArray._from_sequence([1.0, 2.0], dtype="Float64")),
|
348 |
+
([1.0, None], FloatingArray._from_sequence([1.0, pd.NA], dtype="Float64")),
|
349 |
+
([1.0, np.nan], FloatingArray._from_sequence([1.0, pd.NA], dtype="Float64")),
|
350 |
+
([1.0, pd.NA], FloatingArray._from_sequence([1.0, pd.NA], dtype="Float64")),
|
351 |
+
# mixed-integer-float
|
352 |
+
([1, 2.0], FloatingArray._from_sequence([1.0, 2.0], dtype="Float64")),
|
353 |
+
(
|
354 |
+
[1, np.nan, 2.0],
|
355 |
+
FloatingArray._from_sequence([1.0, None, 2.0], dtype="Float64"),
|
356 |
+
),
|
357 |
+
# string
|
358 |
+
(
|
359 |
+
["a", "b"],
|
360 |
+
pd.StringDtype()
|
361 |
+
.construct_array_type()
|
362 |
+
._from_sequence(["a", "b"], dtype=pd.StringDtype()),
|
363 |
+
),
|
364 |
+
(
|
365 |
+
["a", None],
|
366 |
+
pd.StringDtype()
|
367 |
+
.construct_array_type()
|
368 |
+
._from_sequence(["a", None], dtype=pd.StringDtype()),
|
369 |
+
),
|
370 |
+
# Boolean
|
371 |
+
([True, False], BooleanArray._from_sequence([True, False], dtype="boolean")),
|
372 |
+
([True, None], BooleanArray._from_sequence([True, None], dtype="boolean")),
|
373 |
+
],
|
374 |
+
)
|
375 |
+
def test_array_inference(data, expected):
|
376 |
+
result = pd.array(data)
|
377 |
+
tm.assert_equal(result, expected)
|
378 |
+
|
379 |
+
|
380 |
+
@pytest.mark.parametrize(
|
381 |
+
"data",
|
382 |
+
[
|
383 |
+
# mix of frequencies
|
384 |
+
[pd.Period("2000", "D"), pd.Period("2001", "Y")],
|
385 |
+
# mix of closed
|
386 |
+
[pd.Interval(0, 1, closed="left"), pd.Interval(1, 2, closed="right")],
|
387 |
+
# Mix of timezones
|
388 |
+
[pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")],
|
389 |
+
# Mix of tz-aware and tz-naive
|
390 |
+
[pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000")],
|
391 |
+
np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")]),
|
392 |
+
],
|
393 |
+
)
|
394 |
+
def test_array_inference_fails(data):
|
395 |
+
result = pd.array(data)
|
396 |
+
expected = NumpyExtensionArray(np.array(data, dtype=object))
|
397 |
+
tm.assert_extension_array_equal(result, expected)
|
398 |
+
|
399 |
+
|
400 |
+
@pytest.mark.parametrize("data", [np.array(0)])
|
401 |
+
def test_nd_raises(data):
|
402 |
+
with pytest.raises(ValueError, match="NumpyExtensionArray must be 1-dimensional"):
|
403 |
+
pd.array(data, dtype="int64")
|
404 |
+
|
405 |
+
|
406 |
+
def test_scalar_raises():
|
407 |
+
with pytest.raises(ValueError, match="Cannot pass scalar '1'"):
|
408 |
+
pd.array(1)
|
409 |
+
|
410 |
+
|
411 |
+
def test_dataframe_raises():
|
412 |
+
# GH#51167 don't accidentally cast to StringArray by doing inference on columns
|
413 |
+
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
|
414 |
+
msg = "Cannot pass DataFrame to 'pandas.array'"
|
415 |
+
with pytest.raises(TypeError, match=msg):
|
416 |
+
pd.array(df)
|
417 |
+
|
418 |
+
|
419 |
+
def test_bounds_check():
|
420 |
+
# GH21796
|
421 |
+
with pytest.raises(
|
422 |
+
TypeError, match=r"cannot safely cast non-equivalent int(32|64) to uint16"
|
423 |
+
):
|
424 |
+
pd.array([-1, 2, 3], dtype="UInt16")
|
425 |
+
|
426 |
+
|
427 |
+
# ---------------------------------------------------------------------------
|
428 |
+
# A couple dummy classes to ensure that Series and Indexes are unboxed before
|
429 |
+
# getting to the EA classes.
|
430 |
+
|
431 |
+
|
432 |
+
@register_extension_dtype
|
433 |
+
class DecimalDtype2(DecimalDtype):
|
434 |
+
name = "decimal2"
|
435 |
+
|
436 |
+
@classmethod
|
437 |
+
def construct_array_type(cls):
|
438 |
+
"""
|
439 |
+
Return the array type associated with this dtype.
|
440 |
+
|
441 |
+
Returns
|
442 |
+
-------
|
443 |
+
type
|
444 |
+
"""
|
445 |
+
return DecimalArray2
|
446 |
+
|
447 |
+
|
448 |
+
class DecimalArray2(DecimalArray):
|
449 |
+
@classmethod
|
450 |
+
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
|
451 |
+
if isinstance(scalars, (pd.Series, pd.Index)):
|
452 |
+
raise TypeError("scalars should not be of type pd.Series or pd.Index")
|
453 |
+
|
454 |
+
return super()._from_sequence(scalars, dtype=dtype, copy=copy)
|
455 |
+
|
456 |
+
|
457 |
+
def test_array_unboxes(index_or_series):
|
458 |
+
box = index_or_series
|
459 |
+
|
460 |
+
data = box([decimal.Decimal("1"), decimal.Decimal("2")])
|
461 |
+
dtype = DecimalDtype2()
|
462 |
+
# make sure it works
|
463 |
+
with pytest.raises(
|
464 |
+
TypeError, match="scalars should not be of type pd.Series or pd.Index"
|
465 |
+
):
|
466 |
+
DecimalArray2._from_sequence(data, dtype=dtype)
|
467 |
+
|
468 |
+
result = pd.array(data, dtype="decimal2")
|
469 |
+
expected = DecimalArray2._from_sequence(data.values, dtype=dtype)
|
470 |
+
tm.assert_equal(result, expected)
|
471 |
+
|
472 |
+
|
473 |
+
def test_array_to_numpy_na():
|
474 |
+
# GH#40638
|
475 |
+
arr = pd.array([pd.NA, 1], dtype="string[python]")
|
476 |
+
result = arr.to_numpy(na_value=True, dtype=bool)
|
477 |
+
expected = np.array([True, True])
|
478 |
+
tm.assert_numpy_array_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimelike.py
ADDED
@@ -0,0 +1,1340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import re
|
4 |
+
import warnings
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas._libs import (
|
10 |
+
NaT,
|
11 |
+
OutOfBoundsDatetime,
|
12 |
+
Timestamp,
|
13 |
+
)
|
14 |
+
from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
|
15 |
+
from pandas.compat.numpy import np_version_gt2
|
16 |
+
|
17 |
+
import pandas as pd
|
18 |
+
from pandas import (
|
19 |
+
DatetimeIndex,
|
20 |
+
Period,
|
21 |
+
PeriodIndex,
|
22 |
+
TimedeltaIndex,
|
23 |
+
)
|
24 |
+
import pandas._testing as tm
|
25 |
+
from pandas.core.arrays import (
|
26 |
+
DatetimeArray,
|
27 |
+
NumpyExtensionArray,
|
28 |
+
PeriodArray,
|
29 |
+
TimedeltaArray,
|
30 |
+
)
|
31 |
+
|
32 |
+
|
33 |
+
# TODO: more freq variants
|
34 |
+
@pytest.fixture(params=["D", "B", "W", "ME", "QE", "YE"])
|
35 |
+
def freqstr(request):
|
36 |
+
"""Fixture returning parametrized frequency in string format."""
|
37 |
+
return request.param
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture
|
41 |
+
def period_index(freqstr):
|
42 |
+
"""
|
43 |
+
A fixture to provide PeriodIndex objects with different frequencies.
|
44 |
+
|
45 |
+
Most PeriodArray behavior is already tested in PeriodIndex tests,
|
46 |
+
so here we just test that the PeriodArray behavior matches
|
47 |
+
the PeriodIndex behavior.
|
48 |
+
"""
|
49 |
+
# TODO: non-monotone indexes; NaTs, different start dates
|
50 |
+
with warnings.catch_warnings():
|
51 |
+
# suppress deprecation of Period[B]
|
52 |
+
warnings.filterwarnings(
|
53 |
+
"ignore", message="Period with BDay freq", category=FutureWarning
|
54 |
+
)
|
55 |
+
freqstr = freq_to_period_freqstr(1, freqstr)
|
56 |
+
pi = pd.period_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr)
|
57 |
+
return pi
|
58 |
+
|
59 |
+
|
60 |
+
@pytest.fixture
|
61 |
+
def datetime_index(freqstr):
|
62 |
+
"""
|
63 |
+
A fixture to provide DatetimeIndex objects with different frequencies.
|
64 |
+
|
65 |
+
Most DatetimeArray behavior is already tested in DatetimeIndex tests,
|
66 |
+
so here we just test that the DatetimeArray behavior matches
|
67 |
+
the DatetimeIndex behavior.
|
68 |
+
"""
|
69 |
+
# TODO: non-monotone indexes; NaTs, different start dates, timezones
|
70 |
+
dti = pd.date_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr)
|
71 |
+
return dti
|
72 |
+
|
73 |
+
|
74 |
+
@pytest.fixture
|
75 |
+
def timedelta_index():
|
76 |
+
"""
|
77 |
+
A fixture to provide TimedeltaIndex objects with different frequencies.
|
78 |
+
Most TimedeltaArray behavior is already tested in TimedeltaIndex tests,
|
79 |
+
so here we just test that the TimedeltaArray behavior matches
|
80 |
+
the TimedeltaIndex behavior.
|
81 |
+
"""
|
82 |
+
# TODO: flesh this out
|
83 |
+
return TimedeltaIndex(["1 Day", "3 Hours", "NaT"])
|
84 |
+
|
85 |
+
|
86 |
+
class SharedTests:
|
87 |
+
index_cls: type[DatetimeIndex | PeriodIndex | TimedeltaIndex]
|
88 |
+
|
89 |
+
@pytest.fixture
|
90 |
+
def arr1d(self):
|
91 |
+
"""Fixture returning DatetimeArray with daily frequency."""
|
92 |
+
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
93 |
+
if self.array_cls is PeriodArray:
|
94 |
+
arr = self.array_cls(data, freq="D")
|
95 |
+
else:
|
96 |
+
arr = self.index_cls(data, freq="D")._data
|
97 |
+
return arr
|
98 |
+
|
99 |
+
def test_compare_len1_raises(self, arr1d):
|
100 |
+
# make sure we raise when comparing with different lengths, specific
|
101 |
+
# to the case where one has length-1, which numpy would broadcast
|
102 |
+
arr = arr1d
|
103 |
+
idx = self.index_cls(arr)
|
104 |
+
|
105 |
+
with pytest.raises(ValueError, match="Lengths must match"):
|
106 |
+
arr == arr[:1]
|
107 |
+
|
108 |
+
# test the index classes while we're at it, GH#23078
|
109 |
+
with pytest.raises(ValueError, match="Lengths must match"):
|
110 |
+
idx <= idx[[0]]
|
111 |
+
|
112 |
+
@pytest.mark.parametrize(
|
113 |
+
"result",
|
114 |
+
[
|
115 |
+
pd.date_range("2020", periods=3),
|
116 |
+
pd.date_range("2020", periods=3, tz="UTC"),
|
117 |
+
pd.timedelta_range("0 days", periods=3),
|
118 |
+
pd.period_range("2020Q1", periods=3, freq="Q"),
|
119 |
+
],
|
120 |
+
)
|
121 |
+
def test_compare_with_Categorical(self, result):
|
122 |
+
expected = pd.Categorical(result)
|
123 |
+
assert all(result == expected)
|
124 |
+
assert not any(result != expected)
|
125 |
+
|
126 |
+
@pytest.mark.parametrize("reverse", [True, False])
|
127 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
128 |
+
def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered):
|
129 |
+
other = pd.Categorical(arr1d, ordered=ordered)
|
130 |
+
if as_index:
|
131 |
+
other = pd.CategoricalIndex(other)
|
132 |
+
|
133 |
+
left, right = arr1d, other
|
134 |
+
if reverse:
|
135 |
+
left, right = right, left
|
136 |
+
|
137 |
+
ones = np.ones(arr1d.shape, dtype=bool)
|
138 |
+
zeros = ~ones
|
139 |
+
|
140 |
+
result = left == right
|
141 |
+
tm.assert_numpy_array_equal(result, ones)
|
142 |
+
|
143 |
+
result = left != right
|
144 |
+
tm.assert_numpy_array_equal(result, zeros)
|
145 |
+
|
146 |
+
if not reverse and not as_index:
|
147 |
+
# Otherwise Categorical raises TypeError bc it is not ordered
|
148 |
+
# TODO: we should probably get the same behavior regardless?
|
149 |
+
result = left < right
|
150 |
+
tm.assert_numpy_array_equal(result, zeros)
|
151 |
+
|
152 |
+
result = left <= right
|
153 |
+
tm.assert_numpy_array_equal(result, ones)
|
154 |
+
|
155 |
+
result = left > right
|
156 |
+
tm.assert_numpy_array_equal(result, zeros)
|
157 |
+
|
158 |
+
result = left >= right
|
159 |
+
tm.assert_numpy_array_equal(result, ones)
|
160 |
+
|
161 |
+
def test_take(self):
|
162 |
+
data = np.arange(100, dtype="i8") * 24 * 3600 * 10**9
|
163 |
+
np.random.default_rng(2).shuffle(data)
|
164 |
+
|
165 |
+
if self.array_cls is PeriodArray:
|
166 |
+
arr = PeriodArray(data, dtype="period[D]")
|
167 |
+
else:
|
168 |
+
arr = self.index_cls(data)._data
|
169 |
+
idx = self.index_cls._simple_new(arr)
|
170 |
+
|
171 |
+
takers = [1, 4, 94]
|
172 |
+
result = arr.take(takers)
|
173 |
+
expected = idx.take(takers)
|
174 |
+
|
175 |
+
tm.assert_index_equal(self.index_cls(result), expected)
|
176 |
+
|
177 |
+
takers = np.array([1, 4, 94])
|
178 |
+
result = arr.take(takers)
|
179 |
+
expected = idx.take(takers)
|
180 |
+
|
181 |
+
tm.assert_index_equal(self.index_cls(result), expected)
|
182 |
+
|
183 |
+
@pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp(2021, 1, 1, 12).time])
|
184 |
+
def test_take_fill_raises(self, fill_value, arr1d):
|
185 |
+
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
|
186 |
+
with pytest.raises(TypeError, match=msg):
|
187 |
+
arr1d.take([0, 1], allow_fill=True, fill_value=fill_value)
|
188 |
+
|
189 |
+
def test_take_fill(self, arr1d):
|
190 |
+
arr = arr1d
|
191 |
+
|
192 |
+
result = arr.take([-1, 1], allow_fill=True, fill_value=None)
|
193 |
+
assert result[0] is NaT
|
194 |
+
|
195 |
+
result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan)
|
196 |
+
assert result[0] is NaT
|
197 |
+
|
198 |
+
result = arr.take([-1, 1], allow_fill=True, fill_value=NaT)
|
199 |
+
assert result[0] is NaT
|
200 |
+
|
201 |
+
@pytest.mark.filterwarnings(
|
202 |
+
"ignore:Period with BDay freq is deprecated:FutureWarning"
|
203 |
+
)
|
204 |
+
def test_take_fill_str(self, arr1d):
|
205 |
+
# Cast str fill_value matching other fill_value-taking methods
|
206 |
+
result = arr1d.take([-1, 1], allow_fill=True, fill_value=str(arr1d[-1]))
|
207 |
+
expected = arr1d[[-1, 1]]
|
208 |
+
tm.assert_equal(result, expected)
|
209 |
+
|
210 |
+
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
|
211 |
+
with pytest.raises(TypeError, match=msg):
|
212 |
+
arr1d.take([-1, 1], allow_fill=True, fill_value="foo")
|
213 |
+
|
214 |
+
def test_concat_same_type(self, arr1d):
|
215 |
+
arr = arr1d
|
216 |
+
idx = self.index_cls(arr)
|
217 |
+
idx = idx.insert(0, NaT)
|
218 |
+
arr = arr1d
|
219 |
+
|
220 |
+
result = arr._concat_same_type([arr[:-1], arr[1:], arr])
|
221 |
+
arr2 = arr.astype(object)
|
222 |
+
expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2]))
|
223 |
+
|
224 |
+
tm.assert_index_equal(self.index_cls(result), expected)
|
225 |
+
|
226 |
+
def test_unbox_scalar(self, arr1d):
|
227 |
+
result = arr1d._unbox_scalar(arr1d[0])
|
228 |
+
expected = arr1d._ndarray.dtype.type
|
229 |
+
assert isinstance(result, expected)
|
230 |
+
|
231 |
+
result = arr1d._unbox_scalar(NaT)
|
232 |
+
assert isinstance(result, expected)
|
233 |
+
|
234 |
+
msg = f"'value' should be a {self.scalar_type.__name__}."
|
235 |
+
with pytest.raises(ValueError, match=msg):
|
236 |
+
arr1d._unbox_scalar("foo")
|
237 |
+
|
238 |
+
def test_check_compatible_with(self, arr1d):
|
239 |
+
arr1d._check_compatible_with(arr1d[0])
|
240 |
+
arr1d._check_compatible_with(arr1d[:1])
|
241 |
+
arr1d._check_compatible_with(NaT)
|
242 |
+
|
243 |
+
def test_scalar_from_string(self, arr1d):
|
244 |
+
result = arr1d._scalar_from_string(str(arr1d[0]))
|
245 |
+
assert result == arr1d[0]
|
246 |
+
|
247 |
+
def test_reduce_invalid(self, arr1d):
|
248 |
+
msg = "does not support reduction 'not a method'"
|
249 |
+
with pytest.raises(TypeError, match=msg):
|
250 |
+
arr1d._reduce("not a method")
|
251 |
+
|
252 |
+
@pytest.mark.parametrize("method", ["pad", "backfill"])
|
253 |
+
def test_fillna_method_doesnt_change_orig(self, method):
|
254 |
+
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
255 |
+
if self.array_cls is PeriodArray:
|
256 |
+
arr = self.array_cls(data, dtype="period[D]")
|
257 |
+
else:
|
258 |
+
arr = self.array_cls._from_sequence(data)
|
259 |
+
arr[4] = NaT
|
260 |
+
|
261 |
+
fill_value = arr[3] if method == "pad" else arr[5]
|
262 |
+
|
263 |
+
result = arr._pad_or_backfill(method=method)
|
264 |
+
assert result[4] == fill_value
|
265 |
+
|
266 |
+
# check that the original was not changed
|
267 |
+
assert arr[4] is NaT
|
268 |
+
|
269 |
+
def test_searchsorted(self):
|
270 |
+
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
271 |
+
if self.array_cls is PeriodArray:
|
272 |
+
arr = self.array_cls(data, dtype="period[D]")
|
273 |
+
else:
|
274 |
+
arr = self.array_cls._from_sequence(data)
|
275 |
+
|
276 |
+
# scalar
|
277 |
+
result = arr.searchsorted(arr[1])
|
278 |
+
assert result == 1
|
279 |
+
|
280 |
+
result = arr.searchsorted(arr[2], side="right")
|
281 |
+
assert result == 3
|
282 |
+
|
283 |
+
# own-type
|
284 |
+
result = arr.searchsorted(arr[1:3])
|
285 |
+
expected = np.array([1, 2], dtype=np.intp)
|
286 |
+
tm.assert_numpy_array_equal(result, expected)
|
287 |
+
|
288 |
+
result = arr.searchsorted(arr[1:3], side="right")
|
289 |
+
expected = np.array([2, 3], dtype=np.intp)
|
290 |
+
tm.assert_numpy_array_equal(result, expected)
|
291 |
+
|
292 |
+
# GH#29884 match numpy convention on whether NaT goes
|
293 |
+
# at the end or the beginning
|
294 |
+
result = arr.searchsorted(NaT)
|
295 |
+
assert result == 10
|
296 |
+
|
297 |
+
@pytest.mark.parametrize("box", [None, "index", "series"])
|
298 |
+
def test_searchsorted_castable_strings(self, arr1d, box, string_storage):
|
299 |
+
arr = arr1d
|
300 |
+
if box is None:
|
301 |
+
pass
|
302 |
+
elif box == "index":
|
303 |
+
# Test the equivalent Index.searchsorted method while we're here
|
304 |
+
arr = self.index_cls(arr)
|
305 |
+
else:
|
306 |
+
# Test the equivalent Series.searchsorted method while we're here
|
307 |
+
arr = pd.Series(arr)
|
308 |
+
|
309 |
+
# scalar
|
310 |
+
result = arr.searchsorted(str(arr[1]))
|
311 |
+
assert result == 1
|
312 |
+
|
313 |
+
result = arr.searchsorted(str(arr[2]), side="right")
|
314 |
+
assert result == 3
|
315 |
+
|
316 |
+
result = arr.searchsorted([str(x) for x in arr[1:3]])
|
317 |
+
expected = np.array([1, 2], dtype=np.intp)
|
318 |
+
tm.assert_numpy_array_equal(result, expected)
|
319 |
+
|
320 |
+
with pytest.raises(
|
321 |
+
TypeError,
|
322 |
+
match=re.escape(
|
323 |
+
f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', "
|
324 |
+
"or array of those. Got 'str' instead."
|
325 |
+
),
|
326 |
+
):
|
327 |
+
arr.searchsorted("foo")
|
328 |
+
|
329 |
+
with pd.option_context("string_storage", string_storage):
|
330 |
+
with pytest.raises(
|
331 |
+
TypeError,
|
332 |
+
match=re.escape(
|
333 |
+
f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', "
|
334 |
+
"or array of those. Got string array instead."
|
335 |
+
),
|
336 |
+
):
|
337 |
+
arr.searchsorted([str(arr[1]), "baz"])
|
338 |
+
|
339 |
+
def test_getitem_near_implementation_bounds(self):
|
340 |
+
# We only check tz-naive for DTA bc the bounds are slightly different
|
341 |
+
# for other tzs
|
342 |
+
i8vals = np.asarray([NaT._value + n for n in range(1, 5)], dtype="i8")
|
343 |
+
if self.array_cls is PeriodArray:
|
344 |
+
arr = self.array_cls(i8vals, dtype="period[ns]")
|
345 |
+
else:
|
346 |
+
arr = self.index_cls(i8vals, freq="ns")._data
|
347 |
+
arr[0] # should not raise OutOfBoundsDatetime
|
348 |
+
|
349 |
+
index = pd.Index(arr)
|
350 |
+
index[0] # should not raise OutOfBoundsDatetime
|
351 |
+
|
352 |
+
ser = pd.Series(arr)
|
353 |
+
ser[0] # should not raise OutOfBoundsDatetime
|
354 |
+
|
355 |
+
def test_getitem_2d(self, arr1d):
|
356 |
+
# 2d slicing on a 1D array
|
357 |
+
expected = type(arr1d)._simple_new(
|
358 |
+
arr1d._ndarray[:, np.newaxis], dtype=arr1d.dtype
|
359 |
+
)
|
360 |
+
result = arr1d[:, np.newaxis]
|
361 |
+
tm.assert_equal(result, expected)
|
362 |
+
|
363 |
+
# Lookup on a 2D array
|
364 |
+
arr2d = expected
|
365 |
+
expected = type(arr2d)._simple_new(arr2d._ndarray[:3, 0], dtype=arr2d.dtype)
|
366 |
+
result = arr2d[:3, 0]
|
367 |
+
tm.assert_equal(result, expected)
|
368 |
+
|
369 |
+
# Scalar lookup
|
370 |
+
result = arr2d[-1, 0]
|
371 |
+
expected = arr1d[-1]
|
372 |
+
assert result == expected
|
373 |
+
|
374 |
+
def test_iter_2d(self, arr1d):
|
375 |
+
data2d = arr1d._ndarray[:3, np.newaxis]
|
376 |
+
arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)
|
377 |
+
result = list(arr2d)
|
378 |
+
assert len(result) == 3
|
379 |
+
for x in result:
|
380 |
+
assert isinstance(x, type(arr1d))
|
381 |
+
assert x.ndim == 1
|
382 |
+
assert x.dtype == arr1d.dtype
|
383 |
+
|
384 |
+
def test_repr_2d(self, arr1d):
|
385 |
+
data2d = arr1d._ndarray[:3, np.newaxis]
|
386 |
+
arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)
|
387 |
+
|
388 |
+
result = repr(arr2d)
|
389 |
+
|
390 |
+
if isinstance(arr2d, TimedeltaArray):
|
391 |
+
expected = (
|
392 |
+
f"<{type(arr2d).__name__}>\n"
|
393 |
+
"[\n"
|
394 |
+
f"['{arr1d[0]._repr_base()}'],\n"
|
395 |
+
f"['{arr1d[1]._repr_base()}'],\n"
|
396 |
+
f"['{arr1d[2]._repr_base()}']\n"
|
397 |
+
"]\n"
|
398 |
+
f"Shape: (3, 1), dtype: {arr1d.dtype}"
|
399 |
+
)
|
400 |
+
else:
|
401 |
+
expected = (
|
402 |
+
f"<{type(arr2d).__name__}>\n"
|
403 |
+
"[\n"
|
404 |
+
f"['{arr1d[0]}'],\n"
|
405 |
+
f"['{arr1d[1]}'],\n"
|
406 |
+
f"['{arr1d[2]}']\n"
|
407 |
+
"]\n"
|
408 |
+
f"Shape: (3, 1), dtype: {arr1d.dtype}"
|
409 |
+
)
|
410 |
+
|
411 |
+
assert result == expected
|
412 |
+
|
413 |
+
def test_setitem(self):
|
414 |
+
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
415 |
+
if self.array_cls is PeriodArray:
|
416 |
+
arr = self.array_cls(data, dtype="period[D]")
|
417 |
+
else:
|
418 |
+
arr = self.index_cls(data, freq="D")._data
|
419 |
+
|
420 |
+
arr[0] = arr[1]
|
421 |
+
expected = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
422 |
+
expected[0] = expected[1]
|
423 |
+
|
424 |
+
tm.assert_numpy_array_equal(arr.asi8, expected)
|
425 |
+
|
426 |
+
arr[:2] = arr[-2:]
|
427 |
+
expected[:2] = expected[-2:]
|
428 |
+
tm.assert_numpy_array_equal(arr.asi8, expected)
|
429 |
+
|
430 |
+
@pytest.mark.parametrize(
|
431 |
+
"box",
|
432 |
+
[
|
433 |
+
pd.Index,
|
434 |
+
pd.Series,
|
435 |
+
np.array,
|
436 |
+
list,
|
437 |
+
NumpyExtensionArray,
|
438 |
+
],
|
439 |
+
)
|
440 |
+
def test_setitem_object_dtype(self, box, arr1d):
|
441 |
+
expected = arr1d.copy()[::-1]
|
442 |
+
if expected.dtype.kind in ["m", "M"]:
|
443 |
+
expected = expected._with_freq(None)
|
444 |
+
|
445 |
+
vals = expected
|
446 |
+
if box is list:
|
447 |
+
vals = list(vals)
|
448 |
+
elif box is np.array:
|
449 |
+
# if we do np.array(x).astype(object) then dt64 and td64 cast to ints
|
450 |
+
vals = np.array(vals.astype(object))
|
451 |
+
elif box is NumpyExtensionArray:
|
452 |
+
vals = box(np.asarray(vals, dtype=object))
|
453 |
+
else:
|
454 |
+
vals = box(vals).astype(object)
|
455 |
+
|
456 |
+
arr1d[:] = vals
|
457 |
+
|
458 |
+
tm.assert_equal(arr1d, expected)
|
459 |
+
|
460 |
+
def test_setitem_strs(self, arr1d):
|
461 |
+
# Check that we parse strs in both scalar and listlike
|
462 |
+
|
463 |
+
# Setting list-like of strs
|
464 |
+
expected = arr1d.copy()
|
465 |
+
expected[[0, 1]] = arr1d[-2:]
|
466 |
+
|
467 |
+
result = arr1d.copy()
|
468 |
+
result[:2] = [str(x) for x in arr1d[-2:]]
|
469 |
+
tm.assert_equal(result, expected)
|
470 |
+
|
471 |
+
# Same thing but now for just a scalar str
|
472 |
+
expected = arr1d.copy()
|
473 |
+
expected[0] = arr1d[-1]
|
474 |
+
|
475 |
+
result = arr1d.copy()
|
476 |
+
result[0] = str(arr1d[-1])
|
477 |
+
tm.assert_equal(result, expected)
|
478 |
+
|
479 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
480 |
+
def test_setitem_categorical(self, arr1d, as_index):
|
481 |
+
expected = arr1d.copy()[::-1]
|
482 |
+
if not isinstance(expected, PeriodArray):
|
483 |
+
expected = expected._with_freq(None)
|
484 |
+
|
485 |
+
cat = pd.Categorical(arr1d)
|
486 |
+
if as_index:
|
487 |
+
cat = pd.CategoricalIndex(cat)
|
488 |
+
|
489 |
+
arr1d[:] = cat[::-1]
|
490 |
+
|
491 |
+
tm.assert_equal(arr1d, expected)
|
492 |
+
|
493 |
+
def test_setitem_raises(self, arr1d):
|
494 |
+
arr = arr1d[:10]
|
495 |
+
val = arr[0]
|
496 |
+
|
497 |
+
with pytest.raises(IndexError, match="index 12 is out of bounds"):
|
498 |
+
arr[12] = val
|
499 |
+
|
500 |
+
with pytest.raises(TypeError, match="value should be a.* 'object'"):
|
501 |
+
arr[0] = object()
|
502 |
+
|
503 |
+
msg = "cannot set using a list-like indexer with a different length"
|
504 |
+
with pytest.raises(ValueError, match=msg):
|
505 |
+
# GH#36339
|
506 |
+
arr[[]] = [arr[1]]
|
507 |
+
|
508 |
+
msg = "cannot set using a slice indexer with a different length than"
|
509 |
+
with pytest.raises(ValueError, match=msg):
|
510 |
+
# GH#36339
|
511 |
+
arr[1:1] = arr[:3]
|
512 |
+
|
513 |
+
@pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series])
|
514 |
+
def test_setitem_numeric_raises(self, arr1d, box):
|
515 |
+
# We dont case e.g. int64 to our own dtype for setitem
|
516 |
+
|
517 |
+
msg = (
|
518 |
+
f"value should be a '{arr1d._scalar_type.__name__}', "
|
519 |
+
"'NaT', or array of those. Got"
|
520 |
+
)
|
521 |
+
with pytest.raises(TypeError, match=msg):
|
522 |
+
arr1d[:2] = box([0, 1])
|
523 |
+
|
524 |
+
with pytest.raises(TypeError, match=msg):
|
525 |
+
arr1d[:2] = box([0.0, 1.0])
|
526 |
+
|
527 |
+
def test_inplace_arithmetic(self):
|
528 |
+
# GH#24115 check that iadd and isub are actually in-place
|
529 |
+
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
530 |
+
if self.array_cls is PeriodArray:
|
531 |
+
arr = self.array_cls(data, dtype="period[D]")
|
532 |
+
else:
|
533 |
+
arr = self.index_cls(data, freq="D")._data
|
534 |
+
|
535 |
+
expected = arr + pd.Timedelta(days=1)
|
536 |
+
arr += pd.Timedelta(days=1)
|
537 |
+
tm.assert_equal(arr, expected)
|
538 |
+
|
539 |
+
expected = arr - pd.Timedelta(days=1)
|
540 |
+
arr -= pd.Timedelta(days=1)
|
541 |
+
tm.assert_equal(arr, expected)
|
542 |
+
|
543 |
+
def test_shift_fill_int_deprecated(self, arr1d):
|
544 |
+
# GH#31971, enforced in 2.0
|
545 |
+
with pytest.raises(TypeError, match="value should be a"):
|
546 |
+
arr1d.shift(1, fill_value=1)
|
547 |
+
|
548 |
+
def test_median(self, arr1d):
|
549 |
+
arr = arr1d
|
550 |
+
if len(arr) % 2 == 0:
|
551 |
+
# make it easier to define `expected`
|
552 |
+
arr = arr[:-1]
|
553 |
+
|
554 |
+
expected = arr[len(arr) // 2]
|
555 |
+
|
556 |
+
result = arr.median()
|
557 |
+
assert type(result) is type(expected)
|
558 |
+
assert result == expected
|
559 |
+
|
560 |
+
arr[len(arr) // 2] = NaT
|
561 |
+
if not isinstance(expected, Period):
|
562 |
+
expected = arr[len(arr) // 2 - 1 : len(arr) // 2 + 2].mean()
|
563 |
+
|
564 |
+
assert arr.median(skipna=False) is NaT
|
565 |
+
|
566 |
+
result = arr.median()
|
567 |
+
assert type(result) is type(expected)
|
568 |
+
assert result == expected
|
569 |
+
|
570 |
+
assert arr[:0].median() is NaT
|
571 |
+
assert arr[:0].median(skipna=False) is NaT
|
572 |
+
|
573 |
+
# 2d Case
|
574 |
+
arr2 = arr.reshape(-1, 1)
|
575 |
+
|
576 |
+
result = arr2.median(axis=None)
|
577 |
+
assert type(result) is type(expected)
|
578 |
+
assert result == expected
|
579 |
+
|
580 |
+
assert arr2.median(axis=None, skipna=False) is NaT
|
581 |
+
|
582 |
+
result = arr2.median(axis=0)
|
583 |
+
expected2 = type(arr)._from_sequence([expected], dtype=arr.dtype)
|
584 |
+
tm.assert_equal(result, expected2)
|
585 |
+
|
586 |
+
result = arr2.median(axis=0, skipna=False)
|
587 |
+
expected2 = type(arr)._from_sequence([NaT], dtype=arr.dtype)
|
588 |
+
tm.assert_equal(result, expected2)
|
589 |
+
|
590 |
+
result = arr2.median(axis=1)
|
591 |
+
tm.assert_equal(result, arr)
|
592 |
+
|
593 |
+
result = arr2.median(axis=1, skipna=False)
|
594 |
+
tm.assert_equal(result, arr)
|
595 |
+
|
596 |
+
def test_from_integer_array(self):
|
597 |
+
arr = np.array([1, 2, 3], dtype=np.int64)
|
598 |
+
data = pd.array(arr, dtype="Int64")
|
599 |
+
if self.array_cls is PeriodArray:
|
600 |
+
expected = self.array_cls(arr, dtype=self.example_dtype)
|
601 |
+
result = self.array_cls(data, dtype=self.example_dtype)
|
602 |
+
else:
|
603 |
+
expected = self.array_cls._from_sequence(arr, dtype=self.example_dtype)
|
604 |
+
result = self.array_cls._from_sequence(data, dtype=self.example_dtype)
|
605 |
+
|
606 |
+
tm.assert_extension_array_equal(result, expected)
|
607 |
+
|
608 |
+
|
609 |
+
class TestDatetimeArray(SharedTests):
|
610 |
+
index_cls = DatetimeIndex
|
611 |
+
array_cls = DatetimeArray
|
612 |
+
scalar_type = Timestamp
|
613 |
+
example_dtype = "M8[ns]"
|
614 |
+
|
615 |
+
@pytest.fixture
|
616 |
+
def arr1d(self, tz_naive_fixture, freqstr):
|
617 |
+
"""
|
618 |
+
Fixture returning DatetimeArray with parametrized frequency and
|
619 |
+
timezones
|
620 |
+
"""
|
621 |
+
tz = tz_naive_fixture
|
622 |
+
dti = pd.date_range("2016-01-01 01:01:00", periods=5, freq=freqstr, tz=tz)
|
623 |
+
dta = dti._data
|
624 |
+
return dta
|
625 |
+
|
626 |
+
def test_round(self, arr1d):
|
627 |
+
# GH#24064
|
628 |
+
dti = self.index_cls(arr1d)
|
629 |
+
|
630 |
+
result = dti.round(freq="2min")
|
631 |
+
expected = dti - pd.Timedelta(minutes=1)
|
632 |
+
expected = expected._with_freq(None)
|
633 |
+
tm.assert_index_equal(result, expected)
|
634 |
+
|
635 |
+
dta = dti._data
|
636 |
+
result = dta.round(freq="2min")
|
637 |
+
expected = expected._data._with_freq(None)
|
638 |
+
tm.assert_datetime_array_equal(result, expected)
|
639 |
+
|
640 |
+
def test_array_interface(self, datetime_index):
|
641 |
+
arr = datetime_index._data
|
642 |
+
copy_false = None if np_version_gt2 else False
|
643 |
+
|
644 |
+
# default asarray gives the same underlying data (for tz naive)
|
645 |
+
result = np.asarray(arr)
|
646 |
+
expected = arr._ndarray
|
647 |
+
assert result is expected
|
648 |
+
tm.assert_numpy_array_equal(result, expected)
|
649 |
+
result = np.array(arr, copy=copy_false)
|
650 |
+
assert result is expected
|
651 |
+
tm.assert_numpy_array_equal(result, expected)
|
652 |
+
|
653 |
+
# specifying M8[ns] gives the same result as default
|
654 |
+
result = np.asarray(arr, dtype="datetime64[ns]")
|
655 |
+
expected = arr._ndarray
|
656 |
+
assert result is expected
|
657 |
+
tm.assert_numpy_array_equal(result, expected)
|
658 |
+
result = np.array(arr, dtype="datetime64[ns]", copy=copy_false)
|
659 |
+
assert result is expected
|
660 |
+
tm.assert_numpy_array_equal(result, expected)
|
661 |
+
result = np.array(arr, dtype="datetime64[ns]")
|
662 |
+
assert result is not expected
|
663 |
+
tm.assert_numpy_array_equal(result, expected)
|
664 |
+
|
665 |
+
# to object dtype
|
666 |
+
result = np.asarray(arr, dtype=object)
|
667 |
+
expected = np.array(list(arr), dtype=object)
|
668 |
+
tm.assert_numpy_array_equal(result, expected)
|
669 |
+
|
670 |
+
# to other dtype always copies
|
671 |
+
result = np.asarray(arr, dtype="int64")
|
672 |
+
assert result is not arr.asi8
|
673 |
+
assert not np.may_share_memory(arr, result)
|
674 |
+
expected = arr.asi8.copy()
|
675 |
+
tm.assert_numpy_array_equal(result, expected)
|
676 |
+
|
677 |
+
# other dtypes handled by numpy
|
678 |
+
for dtype in ["float64", str]:
|
679 |
+
result = np.asarray(arr, dtype=dtype)
|
680 |
+
expected = np.asarray(arr).astype(dtype)
|
681 |
+
tm.assert_numpy_array_equal(result, expected)
|
682 |
+
|
683 |
+
def test_array_object_dtype(self, arr1d):
|
684 |
+
# GH#23524
|
685 |
+
arr = arr1d
|
686 |
+
dti = self.index_cls(arr1d)
|
687 |
+
|
688 |
+
expected = np.array(list(dti))
|
689 |
+
|
690 |
+
result = np.array(arr, dtype=object)
|
691 |
+
tm.assert_numpy_array_equal(result, expected)
|
692 |
+
|
693 |
+
# also test the DatetimeIndex method while we're at it
|
694 |
+
result = np.array(dti, dtype=object)
|
695 |
+
tm.assert_numpy_array_equal(result, expected)
|
696 |
+
|
697 |
+
def test_array_tz(self, arr1d):
|
698 |
+
# GH#23524
|
699 |
+
arr = arr1d
|
700 |
+
dti = self.index_cls(arr1d)
|
701 |
+
copy_false = None if np_version_gt2 else False
|
702 |
+
|
703 |
+
expected = dti.asi8.view("M8[ns]")
|
704 |
+
result = np.array(arr, dtype="M8[ns]")
|
705 |
+
tm.assert_numpy_array_equal(result, expected)
|
706 |
+
|
707 |
+
result = np.array(arr, dtype="datetime64[ns]")
|
708 |
+
tm.assert_numpy_array_equal(result, expected)
|
709 |
+
|
710 |
+
# check that we are not making copies when setting copy=copy_false
|
711 |
+
result = np.array(arr, dtype="M8[ns]", copy=copy_false)
|
712 |
+
assert result.base is expected.base
|
713 |
+
assert result.base is not None
|
714 |
+
result = np.array(arr, dtype="datetime64[ns]", copy=copy_false)
|
715 |
+
assert result.base is expected.base
|
716 |
+
assert result.base is not None
|
717 |
+
|
718 |
+
def test_array_i8_dtype(self, arr1d):
|
719 |
+
arr = arr1d
|
720 |
+
dti = self.index_cls(arr1d)
|
721 |
+
copy_false = None if np_version_gt2 else False
|
722 |
+
|
723 |
+
expected = dti.asi8
|
724 |
+
result = np.array(arr, dtype="i8")
|
725 |
+
tm.assert_numpy_array_equal(result, expected)
|
726 |
+
|
727 |
+
result = np.array(arr, dtype=np.int64)
|
728 |
+
tm.assert_numpy_array_equal(result, expected)
|
729 |
+
|
730 |
+
# check that we are still making copies when setting copy=copy_false
|
731 |
+
result = np.array(arr, dtype="i8", copy=copy_false)
|
732 |
+
assert result.base is not expected.base
|
733 |
+
assert result.base is None
|
734 |
+
|
735 |
+
def test_from_array_keeps_base(self):
|
736 |
+
# Ensure that DatetimeArray._ndarray.base isn't lost.
|
737 |
+
arr = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")
|
738 |
+
dta = DatetimeArray._from_sequence(arr)
|
739 |
+
|
740 |
+
assert dta._ndarray is arr
|
741 |
+
dta = DatetimeArray._from_sequence(arr[:0])
|
742 |
+
assert dta._ndarray.base is arr
|
743 |
+
|
744 |
+
def test_from_dti(self, arr1d):
|
745 |
+
arr = arr1d
|
746 |
+
dti = self.index_cls(arr1d)
|
747 |
+
assert list(dti) == list(arr)
|
748 |
+
|
749 |
+
# Check that Index.__new__ knows what to do with DatetimeArray
|
750 |
+
dti2 = pd.Index(arr)
|
751 |
+
assert isinstance(dti2, DatetimeIndex)
|
752 |
+
assert list(dti2) == list(arr)
|
753 |
+
|
754 |
+
def test_astype_object(self, arr1d):
|
755 |
+
arr = arr1d
|
756 |
+
dti = self.index_cls(arr1d)
|
757 |
+
|
758 |
+
asobj = arr.astype("O")
|
759 |
+
assert isinstance(asobj, np.ndarray)
|
760 |
+
assert asobj.dtype == "O"
|
761 |
+
assert list(asobj) == list(dti)
|
762 |
+
|
763 |
+
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
|
764 |
+
def test_to_period(self, datetime_index, freqstr):
|
765 |
+
dti = datetime_index
|
766 |
+
arr = dti._data
|
767 |
+
|
768 |
+
freqstr = freq_to_period_freqstr(1, freqstr)
|
769 |
+
expected = dti.to_period(freq=freqstr)
|
770 |
+
result = arr.to_period(freq=freqstr)
|
771 |
+
assert isinstance(result, PeriodArray)
|
772 |
+
|
773 |
+
tm.assert_equal(result, expected._data)
|
774 |
+
|
775 |
+
def test_to_period_2d(self, arr1d):
|
776 |
+
arr2d = arr1d.reshape(1, -1)
|
777 |
+
|
778 |
+
warn = None if arr1d.tz is None else UserWarning
|
779 |
+
with tm.assert_produces_warning(warn):
|
780 |
+
result = arr2d.to_period("D")
|
781 |
+
expected = arr1d.to_period("D").reshape(1, -1)
|
782 |
+
tm.assert_period_array_equal(result, expected)
|
783 |
+
|
784 |
+
@pytest.mark.parametrize("propname", DatetimeArray._bool_ops)
|
785 |
+
def test_bool_properties(self, arr1d, propname):
|
786 |
+
# in this case _bool_ops is just `is_leap_year`
|
787 |
+
dti = self.index_cls(arr1d)
|
788 |
+
arr = arr1d
|
789 |
+
assert dti.freq == arr.freq
|
790 |
+
|
791 |
+
result = getattr(arr, propname)
|
792 |
+
expected = np.array(getattr(dti, propname), dtype=result.dtype)
|
793 |
+
|
794 |
+
tm.assert_numpy_array_equal(result, expected)
|
795 |
+
|
796 |
+
@pytest.mark.parametrize("propname", DatetimeArray._field_ops)
|
797 |
+
def test_int_properties(self, arr1d, propname):
|
798 |
+
dti = self.index_cls(arr1d)
|
799 |
+
arr = arr1d
|
800 |
+
|
801 |
+
result = getattr(arr, propname)
|
802 |
+
expected = np.array(getattr(dti, propname), dtype=result.dtype)
|
803 |
+
|
804 |
+
tm.assert_numpy_array_equal(result, expected)
|
805 |
+
|
806 |
+
def test_take_fill_valid(self, arr1d, fixed_now_ts):
|
807 |
+
arr = arr1d
|
808 |
+
dti = self.index_cls(arr1d)
|
809 |
+
|
810 |
+
now = fixed_now_ts.tz_localize(dti.tz)
|
811 |
+
result = arr.take([-1, 1], allow_fill=True, fill_value=now)
|
812 |
+
assert result[0] == now
|
813 |
+
|
814 |
+
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
|
815 |
+
with pytest.raises(TypeError, match=msg):
|
816 |
+
# fill_value Timedelta invalid
|
817 |
+
arr.take([-1, 1], allow_fill=True, fill_value=now - now)
|
818 |
+
|
819 |
+
with pytest.raises(TypeError, match=msg):
|
820 |
+
# fill_value Period invalid
|
821 |
+
arr.take([-1, 1], allow_fill=True, fill_value=Period("2014Q1"))
|
822 |
+
|
823 |
+
tz = None if dti.tz is not None else "US/Eastern"
|
824 |
+
now = fixed_now_ts.tz_localize(tz)
|
825 |
+
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
|
826 |
+
with pytest.raises(TypeError, match=msg):
|
827 |
+
# Timestamp with mismatched tz-awareness
|
828 |
+
arr.take([-1, 1], allow_fill=True, fill_value=now)
|
829 |
+
|
830 |
+
value = NaT._value
|
831 |
+
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
|
832 |
+
with pytest.raises(TypeError, match=msg):
|
833 |
+
# require NaT, not iNaT, as it could be confused with an integer
|
834 |
+
arr.take([-1, 1], allow_fill=True, fill_value=value)
|
835 |
+
|
836 |
+
value = np.timedelta64("NaT", "ns")
|
837 |
+
with pytest.raises(TypeError, match=msg):
|
838 |
+
# require appropriate-dtype if we have a NA value
|
839 |
+
arr.take([-1, 1], allow_fill=True, fill_value=value)
|
840 |
+
|
841 |
+
if arr.tz is not None:
|
842 |
+
# GH#37356
|
843 |
+
# Assuming here that arr1d fixture does not include Australia/Melbourne
|
844 |
+
value = fixed_now_ts.tz_localize("Australia/Melbourne")
|
845 |
+
result = arr.take([-1, 1], allow_fill=True, fill_value=value)
|
846 |
+
|
847 |
+
expected = arr.take(
|
848 |
+
[-1, 1],
|
849 |
+
allow_fill=True,
|
850 |
+
fill_value=value.tz_convert(arr.dtype.tz),
|
851 |
+
)
|
852 |
+
tm.assert_equal(result, expected)
|
853 |
+
|
854 |
+
def test_concat_same_type_invalid(self, arr1d):
|
855 |
+
# different timezones
|
856 |
+
arr = arr1d
|
857 |
+
|
858 |
+
if arr.tz is None:
|
859 |
+
other = arr.tz_localize("UTC")
|
860 |
+
else:
|
861 |
+
other = arr.tz_localize(None)
|
862 |
+
|
863 |
+
with pytest.raises(ValueError, match="to_concat must have the same"):
|
864 |
+
arr._concat_same_type([arr, other])
|
865 |
+
|
866 |
+
def test_concat_same_type_different_freq(self, unit):
|
867 |
+
# we *can* concatenate DTI with different freqs.
|
868 |
+
a = pd.date_range("2000", periods=2, freq="D", tz="US/Central", unit=unit)._data
|
869 |
+
b = pd.date_range("2000", periods=2, freq="h", tz="US/Central", unit=unit)._data
|
870 |
+
result = DatetimeArray._concat_same_type([a, b])
|
871 |
+
expected = (
|
872 |
+
pd.to_datetime(
|
873 |
+
[
|
874 |
+
"2000-01-01 00:00:00",
|
875 |
+
"2000-01-02 00:00:00",
|
876 |
+
"2000-01-01 00:00:00",
|
877 |
+
"2000-01-01 01:00:00",
|
878 |
+
]
|
879 |
+
)
|
880 |
+
.tz_localize("US/Central")
|
881 |
+
.as_unit(unit)
|
882 |
+
._data
|
883 |
+
)
|
884 |
+
|
885 |
+
tm.assert_datetime_array_equal(result, expected)
|
886 |
+
|
887 |
+
def test_strftime(self, arr1d):
|
888 |
+
arr = arr1d
|
889 |
+
|
890 |
+
result = arr.strftime("%Y %b")
|
891 |
+
expected = np.array([ts.strftime("%Y %b") for ts in arr], dtype=object)
|
892 |
+
tm.assert_numpy_array_equal(result, expected)
|
893 |
+
|
894 |
+
def test_strftime_nat(self):
|
895 |
+
# GH 29578
|
896 |
+
arr = DatetimeIndex(["2019-01-01", NaT])._data
|
897 |
+
|
898 |
+
result = arr.strftime("%Y-%m-%d")
|
899 |
+
expected = np.array(["2019-01-01", np.nan], dtype=object)
|
900 |
+
tm.assert_numpy_array_equal(result, expected)
|
901 |
+
|
902 |
+
|
903 |
+
class TestTimedeltaArray(SharedTests):
|
904 |
+
index_cls = TimedeltaIndex
|
905 |
+
array_cls = TimedeltaArray
|
906 |
+
scalar_type = pd.Timedelta
|
907 |
+
example_dtype = "m8[ns]"
|
908 |
+
|
909 |
+
def test_from_tdi(self):
|
910 |
+
tdi = TimedeltaIndex(["1 Day", "3 Hours"])
|
911 |
+
arr = tdi._data
|
912 |
+
assert list(arr) == list(tdi)
|
913 |
+
|
914 |
+
# Check that Index.__new__ knows what to do with TimedeltaArray
|
915 |
+
tdi2 = pd.Index(arr)
|
916 |
+
assert isinstance(tdi2, TimedeltaIndex)
|
917 |
+
assert list(tdi2) == list(arr)
|
918 |
+
|
919 |
+
def test_astype_object(self):
|
920 |
+
tdi = TimedeltaIndex(["1 Day", "3 Hours"])
|
921 |
+
arr = tdi._data
|
922 |
+
asobj = arr.astype("O")
|
923 |
+
assert isinstance(asobj, np.ndarray)
|
924 |
+
assert asobj.dtype == "O"
|
925 |
+
assert list(asobj) == list(tdi)
|
926 |
+
|
927 |
+
def test_to_pytimedelta(self, timedelta_index):
|
928 |
+
tdi = timedelta_index
|
929 |
+
arr = tdi._data
|
930 |
+
|
931 |
+
expected = tdi.to_pytimedelta()
|
932 |
+
result = arr.to_pytimedelta()
|
933 |
+
|
934 |
+
tm.assert_numpy_array_equal(result, expected)
|
935 |
+
|
936 |
+
def test_total_seconds(self, timedelta_index):
|
937 |
+
tdi = timedelta_index
|
938 |
+
arr = tdi._data
|
939 |
+
|
940 |
+
expected = tdi.total_seconds()
|
941 |
+
result = arr.total_seconds()
|
942 |
+
|
943 |
+
tm.assert_numpy_array_equal(result, expected.values)
|
944 |
+
|
945 |
+
@pytest.mark.parametrize("propname", TimedeltaArray._field_ops)
|
946 |
+
def test_int_properties(self, timedelta_index, propname):
|
947 |
+
tdi = timedelta_index
|
948 |
+
arr = tdi._data
|
949 |
+
|
950 |
+
result = getattr(arr, propname)
|
951 |
+
expected = np.array(getattr(tdi, propname), dtype=result.dtype)
|
952 |
+
|
953 |
+
tm.assert_numpy_array_equal(result, expected)
|
954 |
+
|
955 |
+
def test_array_interface(self, timedelta_index):
|
956 |
+
arr = timedelta_index._data
|
957 |
+
copy_false = None if np_version_gt2 else False
|
958 |
+
|
959 |
+
# default asarray gives the same underlying data
|
960 |
+
result = np.asarray(arr)
|
961 |
+
expected = arr._ndarray
|
962 |
+
assert result is expected
|
963 |
+
tm.assert_numpy_array_equal(result, expected)
|
964 |
+
result = np.array(arr, copy=copy_false)
|
965 |
+
assert result is expected
|
966 |
+
tm.assert_numpy_array_equal(result, expected)
|
967 |
+
|
968 |
+
# specifying m8[ns] gives the same result as default
|
969 |
+
result = np.asarray(arr, dtype="timedelta64[ns]")
|
970 |
+
expected = arr._ndarray
|
971 |
+
assert result is expected
|
972 |
+
tm.assert_numpy_array_equal(result, expected)
|
973 |
+
result = np.array(arr, dtype="timedelta64[ns]", copy=copy_false)
|
974 |
+
assert result is expected
|
975 |
+
tm.assert_numpy_array_equal(result, expected)
|
976 |
+
result = np.array(arr, dtype="timedelta64[ns]")
|
977 |
+
assert result is not expected
|
978 |
+
tm.assert_numpy_array_equal(result, expected)
|
979 |
+
|
980 |
+
# to object dtype
|
981 |
+
result = np.asarray(arr, dtype=object)
|
982 |
+
expected = np.array(list(arr), dtype=object)
|
983 |
+
tm.assert_numpy_array_equal(result, expected)
|
984 |
+
|
985 |
+
# to other dtype always copies
|
986 |
+
result = np.asarray(arr, dtype="int64")
|
987 |
+
assert result is not arr.asi8
|
988 |
+
assert not np.may_share_memory(arr, result)
|
989 |
+
expected = arr.asi8.copy()
|
990 |
+
tm.assert_numpy_array_equal(result, expected)
|
991 |
+
|
992 |
+
# other dtypes handled by numpy
|
993 |
+
for dtype in ["float64", str]:
|
994 |
+
result = np.asarray(arr, dtype=dtype)
|
995 |
+
expected = np.asarray(arr).astype(dtype)
|
996 |
+
tm.assert_numpy_array_equal(result, expected)
|
997 |
+
|
998 |
+
def test_take_fill_valid(self, timedelta_index, fixed_now_ts):
|
999 |
+
tdi = timedelta_index
|
1000 |
+
arr = tdi._data
|
1001 |
+
|
1002 |
+
td1 = pd.Timedelta(days=1)
|
1003 |
+
result = arr.take([-1, 1], allow_fill=True, fill_value=td1)
|
1004 |
+
assert result[0] == td1
|
1005 |
+
|
1006 |
+
value = fixed_now_ts
|
1007 |
+
msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got"
|
1008 |
+
with pytest.raises(TypeError, match=msg):
|
1009 |
+
# fill_value Timestamp invalid
|
1010 |
+
arr.take([0, 1], allow_fill=True, fill_value=value)
|
1011 |
+
|
1012 |
+
value = fixed_now_ts.to_period("D")
|
1013 |
+
with pytest.raises(TypeError, match=msg):
|
1014 |
+
# fill_value Period invalid
|
1015 |
+
arr.take([0, 1], allow_fill=True, fill_value=value)
|
1016 |
+
|
1017 |
+
value = np.datetime64("NaT", "ns")
|
1018 |
+
with pytest.raises(TypeError, match=msg):
|
1019 |
+
# require appropriate-dtype if we have a NA value
|
1020 |
+
arr.take([-1, 1], allow_fill=True, fill_value=value)
|
1021 |
+
|
1022 |
+
|
1023 |
+
@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning")
|
1024 |
+
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
|
1025 |
+
class TestPeriodArray(SharedTests):
|
1026 |
+
index_cls = PeriodIndex
|
1027 |
+
array_cls = PeriodArray
|
1028 |
+
scalar_type = Period
|
1029 |
+
example_dtype = PeriodIndex([], freq="W").dtype
|
1030 |
+
|
1031 |
+
@pytest.fixture
|
1032 |
+
def arr1d(self, period_index):
|
1033 |
+
"""
|
1034 |
+
Fixture returning DatetimeArray from parametrized PeriodIndex objects
|
1035 |
+
"""
|
1036 |
+
return period_index._data
|
1037 |
+
|
1038 |
+
def test_from_pi(self, arr1d):
|
1039 |
+
pi = self.index_cls(arr1d)
|
1040 |
+
arr = arr1d
|
1041 |
+
assert list(arr) == list(pi)
|
1042 |
+
|
1043 |
+
# Check that Index.__new__ knows what to do with PeriodArray
|
1044 |
+
pi2 = pd.Index(arr)
|
1045 |
+
assert isinstance(pi2, PeriodIndex)
|
1046 |
+
assert list(pi2) == list(arr)
|
1047 |
+
|
1048 |
+
def test_astype_object(self, arr1d):
|
1049 |
+
pi = self.index_cls(arr1d)
|
1050 |
+
arr = arr1d
|
1051 |
+
asobj = arr.astype("O")
|
1052 |
+
assert isinstance(asobj, np.ndarray)
|
1053 |
+
assert asobj.dtype == "O"
|
1054 |
+
assert list(asobj) == list(pi)
|
1055 |
+
|
1056 |
+
def test_take_fill_valid(self, arr1d):
|
1057 |
+
arr = arr1d
|
1058 |
+
|
1059 |
+
value = NaT._value
|
1060 |
+
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
|
1061 |
+
with pytest.raises(TypeError, match=msg):
|
1062 |
+
# require NaT, not iNaT, as it could be confused with an integer
|
1063 |
+
arr.take([-1, 1], allow_fill=True, fill_value=value)
|
1064 |
+
|
1065 |
+
value = np.timedelta64("NaT", "ns")
|
1066 |
+
with pytest.raises(TypeError, match=msg):
|
1067 |
+
# require appropriate-dtype if we have a NA value
|
1068 |
+
arr.take([-1, 1], allow_fill=True, fill_value=value)
|
1069 |
+
|
1070 |
+
@pytest.mark.parametrize("how", ["S", "E"])
|
1071 |
+
def test_to_timestamp(self, how, arr1d):
|
1072 |
+
pi = self.index_cls(arr1d)
|
1073 |
+
arr = arr1d
|
1074 |
+
|
1075 |
+
expected = DatetimeIndex(pi.to_timestamp(how=how))._data
|
1076 |
+
result = arr.to_timestamp(how=how)
|
1077 |
+
assert isinstance(result, DatetimeArray)
|
1078 |
+
|
1079 |
+
tm.assert_equal(result, expected)
|
1080 |
+
|
1081 |
+
def test_to_timestamp_roundtrip_bday(self):
|
1082 |
+
# Case where infer_freq inside would choose "D" instead of "B"
|
1083 |
+
dta = pd.date_range("2021-10-18", periods=3, freq="B")._data
|
1084 |
+
parr = dta.to_period()
|
1085 |
+
result = parr.to_timestamp()
|
1086 |
+
assert result.freq == "B"
|
1087 |
+
tm.assert_extension_array_equal(result, dta)
|
1088 |
+
|
1089 |
+
dta2 = dta[::2]
|
1090 |
+
parr2 = dta2.to_period()
|
1091 |
+
result2 = parr2.to_timestamp()
|
1092 |
+
assert result2.freq == "2B"
|
1093 |
+
tm.assert_extension_array_equal(result2, dta2)
|
1094 |
+
|
1095 |
+
parr3 = dta.to_period("2B")
|
1096 |
+
result3 = parr3.to_timestamp()
|
1097 |
+
assert result3.freq == "B"
|
1098 |
+
tm.assert_extension_array_equal(result3, dta)
|
1099 |
+
|
1100 |
+
def test_to_timestamp_out_of_bounds(self):
|
1101 |
+
# GH#19643 previously overflowed silently
|
1102 |
+
pi = pd.period_range("1500", freq="Y", periods=3)
|
1103 |
+
msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00"
|
1104 |
+
with pytest.raises(OutOfBoundsDatetime, match=msg):
|
1105 |
+
pi.to_timestamp()
|
1106 |
+
|
1107 |
+
with pytest.raises(OutOfBoundsDatetime, match=msg):
|
1108 |
+
pi._data.to_timestamp()
|
1109 |
+
|
1110 |
+
@pytest.mark.parametrize("propname", PeriodArray._bool_ops)
|
1111 |
+
def test_bool_properties(self, arr1d, propname):
|
1112 |
+
# in this case _bool_ops is just `is_leap_year`
|
1113 |
+
pi = self.index_cls(arr1d)
|
1114 |
+
arr = arr1d
|
1115 |
+
|
1116 |
+
result = getattr(arr, propname)
|
1117 |
+
expected = np.array(getattr(pi, propname))
|
1118 |
+
|
1119 |
+
tm.assert_numpy_array_equal(result, expected)
|
1120 |
+
|
1121 |
+
@pytest.mark.parametrize("propname", PeriodArray._field_ops)
|
1122 |
+
def test_int_properties(self, arr1d, propname):
|
1123 |
+
pi = self.index_cls(arr1d)
|
1124 |
+
arr = arr1d
|
1125 |
+
|
1126 |
+
result = getattr(arr, propname)
|
1127 |
+
expected = np.array(getattr(pi, propname))
|
1128 |
+
|
1129 |
+
tm.assert_numpy_array_equal(result, expected)
|
1130 |
+
|
1131 |
+
def test_array_interface(self, arr1d):
|
1132 |
+
arr = arr1d
|
1133 |
+
|
1134 |
+
# default asarray gives objects
|
1135 |
+
result = np.asarray(arr)
|
1136 |
+
expected = np.array(list(arr), dtype=object)
|
1137 |
+
tm.assert_numpy_array_equal(result, expected)
|
1138 |
+
|
1139 |
+
# to object dtype (same as default)
|
1140 |
+
result = np.asarray(arr, dtype=object)
|
1141 |
+
tm.assert_numpy_array_equal(result, expected)
|
1142 |
+
|
1143 |
+
result = np.asarray(arr, dtype="int64")
|
1144 |
+
tm.assert_numpy_array_equal(result, arr.asi8)
|
1145 |
+
|
1146 |
+
# to other dtypes
|
1147 |
+
msg = r"float\(\) argument must be a string or a( real)? number, not 'Period'"
|
1148 |
+
with pytest.raises(TypeError, match=msg):
|
1149 |
+
np.asarray(arr, dtype="float64")
|
1150 |
+
|
1151 |
+
result = np.asarray(arr, dtype="S20")
|
1152 |
+
expected = np.asarray(arr).astype("S20")
|
1153 |
+
tm.assert_numpy_array_equal(result, expected)
|
1154 |
+
|
1155 |
+
def test_strftime(self, arr1d):
|
1156 |
+
arr = arr1d
|
1157 |
+
|
1158 |
+
result = arr.strftime("%Y")
|
1159 |
+
expected = np.array([per.strftime("%Y") for per in arr], dtype=object)
|
1160 |
+
tm.assert_numpy_array_equal(result, expected)
|
1161 |
+
|
1162 |
+
def test_strftime_nat(self):
|
1163 |
+
# GH 29578
|
1164 |
+
arr = PeriodArray(PeriodIndex(["2019-01-01", NaT], dtype="period[D]"))
|
1165 |
+
|
1166 |
+
result = arr.strftime("%Y-%m-%d")
|
1167 |
+
expected = np.array(["2019-01-01", np.nan], dtype=object)
|
1168 |
+
tm.assert_numpy_array_equal(result, expected)
|
1169 |
+
|
1170 |
+
|
1171 |
+
@pytest.mark.parametrize(
|
1172 |
+
"arr,casting_nats",
|
1173 |
+
[
|
1174 |
+
(
|
1175 |
+
TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
|
1176 |
+
(NaT, np.timedelta64("NaT", "ns")),
|
1177 |
+
),
|
1178 |
+
(
|
1179 |
+
pd.date_range("2000-01-01", periods=3, freq="D")._data,
|
1180 |
+
(NaT, np.datetime64("NaT", "ns")),
|
1181 |
+
),
|
1182 |
+
(pd.period_range("2000-01-01", periods=3, freq="D")._data, (NaT,)),
|
1183 |
+
],
|
1184 |
+
ids=lambda x: type(x).__name__,
|
1185 |
+
)
|
1186 |
+
def test_casting_nat_setitem_array(arr, casting_nats):
|
1187 |
+
expected = type(arr)._from_sequence([NaT, arr[1], arr[2]], dtype=arr.dtype)
|
1188 |
+
|
1189 |
+
for nat in casting_nats:
|
1190 |
+
arr = arr.copy()
|
1191 |
+
arr[0] = nat
|
1192 |
+
tm.assert_equal(arr, expected)
|
1193 |
+
|
1194 |
+
|
1195 |
+
@pytest.mark.parametrize(
|
1196 |
+
"arr,non_casting_nats",
|
1197 |
+
[
|
1198 |
+
(
|
1199 |
+
TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
|
1200 |
+
(np.datetime64("NaT", "ns"), NaT._value),
|
1201 |
+
),
|
1202 |
+
(
|
1203 |
+
pd.date_range("2000-01-01", periods=3, freq="D")._data,
|
1204 |
+
(np.timedelta64("NaT", "ns"), NaT._value),
|
1205 |
+
),
|
1206 |
+
(
|
1207 |
+
pd.period_range("2000-01-01", periods=3, freq="D")._data,
|
1208 |
+
(np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), NaT._value),
|
1209 |
+
),
|
1210 |
+
],
|
1211 |
+
ids=lambda x: type(x).__name__,
|
1212 |
+
)
|
1213 |
+
def test_invalid_nat_setitem_array(arr, non_casting_nats):
|
1214 |
+
msg = (
|
1215 |
+
"value should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. "
|
1216 |
+
"Got '(timedelta64|datetime64|int)' instead."
|
1217 |
+
)
|
1218 |
+
|
1219 |
+
for nat in non_casting_nats:
|
1220 |
+
with pytest.raises(TypeError, match=msg):
|
1221 |
+
arr[0] = nat
|
1222 |
+
|
1223 |
+
|
1224 |
+
@pytest.mark.parametrize(
|
1225 |
+
"arr",
|
1226 |
+
[
|
1227 |
+
pd.date_range("2000", periods=4).array,
|
1228 |
+
pd.timedelta_range("2000", periods=4).array,
|
1229 |
+
],
|
1230 |
+
)
|
1231 |
+
def test_to_numpy_extra(arr):
|
1232 |
+
arr[0] = NaT
|
1233 |
+
original = arr.copy()
|
1234 |
+
|
1235 |
+
result = arr.to_numpy()
|
1236 |
+
assert np.isnan(result[0])
|
1237 |
+
|
1238 |
+
result = arr.to_numpy(dtype="int64")
|
1239 |
+
assert result[0] == -9223372036854775808
|
1240 |
+
|
1241 |
+
result = arr.to_numpy(dtype="int64", na_value=0)
|
1242 |
+
assert result[0] == 0
|
1243 |
+
|
1244 |
+
result = arr.to_numpy(na_value=arr[1].to_numpy())
|
1245 |
+
assert result[0] == result[1]
|
1246 |
+
|
1247 |
+
result = arr.to_numpy(na_value=arr[1].to_numpy(copy=False))
|
1248 |
+
assert result[0] == result[1]
|
1249 |
+
|
1250 |
+
tm.assert_equal(arr, original)
|
1251 |
+
|
1252 |
+
|
1253 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
1254 |
+
@pytest.mark.parametrize(
|
1255 |
+
"values",
|
1256 |
+
[
|
1257 |
+
pd.to_datetime(["2020-01-01", "2020-02-01"]),
|
1258 |
+
pd.to_timedelta([1, 2], unit="D"),
|
1259 |
+
PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),
|
1260 |
+
],
|
1261 |
+
)
|
1262 |
+
@pytest.mark.parametrize(
|
1263 |
+
"klass",
|
1264 |
+
[
|
1265 |
+
list,
|
1266 |
+
np.array,
|
1267 |
+
pd.array,
|
1268 |
+
pd.Series,
|
1269 |
+
pd.Index,
|
1270 |
+
pd.Categorical,
|
1271 |
+
pd.CategoricalIndex,
|
1272 |
+
],
|
1273 |
+
)
|
1274 |
+
def test_searchsorted_datetimelike_with_listlike(values, klass, as_index):
|
1275 |
+
# https://github.com/pandas-dev/pandas/issues/32762
|
1276 |
+
if not as_index:
|
1277 |
+
values = values._data
|
1278 |
+
|
1279 |
+
result = values.searchsorted(klass(values))
|
1280 |
+
expected = np.array([0, 1], dtype=result.dtype)
|
1281 |
+
|
1282 |
+
tm.assert_numpy_array_equal(result, expected)
|
1283 |
+
|
1284 |
+
|
1285 |
+
@pytest.mark.parametrize(
|
1286 |
+
"values",
|
1287 |
+
[
|
1288 |
+
pd.to_datetime(["2020-01-01", "2020-02-01"]),
|
1289 |
+
pd.to_timedelta([1, 2], unit="D"),
|
1290 |
+
PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),
|
1291 |
+
],
|
1292 |
+
)
|
1293 |
+
@pytest.mark.parametrize(
|
1294 |
+
"arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2]
|
1295 |
+
)
|
1296 |
+
def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg):
|
1297 |
+
# https://github.com/pandas-dev/pandas/issues/32762
|
1298 |
+
msg = "[Unexpected type|Cannot compare]"
|
1299 |
+
with pytest.raises(TypeError, match=msg):
|
1300 |
+
values.searchsorted(arg)
|
1301 |
+
|
1302 |
+
|
1303 |
+
@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
|
1304 |
+
def test_period_index_construction_from_strings(klass):
|
1305 |
+
# https://github.com/pandas-dev/pandas/issues/26109
|
1306 |
+
strings = ["2020Q1", "2020Q2"] * 2
|
1307 |
+
data = klass(strings)
|
1308 |
+
result = PeriodIndex(data, freq="Q")
|
1309 |
+
expected = PeriodIndex([Period(s) for s in strings])
|
1310 |
+
tm.assert_index_equal(result, expected)
|
1311 |
+
|
1312 |
+
|
1313 |
+
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
|
1314 |
+
def test_from_pandas_array(dtype):
|
1315 |
+
# GH#24615
|
1316 |
+
data = np.array([1, 2, 3], dtype=dtype)
|
1317 |
+
arr = NumpyExtensionArray(data)
|
1318 |
+
|
1319 |
+
cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype]
|
1320 |
+
|
1321 |
+
depr_msg = f"{cls.__name__}.__init__ is deprecated"
|
1322 |
+
with tm.assert_produces_warning(FutureWarning, match=depr_msg):
|
1323 |
+
result = cls(arr)
|
1324 |
+
expected = cls(data)
|
1325 |
+
tm.assert_extension_array_equal(result, expected)
|
1326 |
+
|
1327 |
+
result = cls._from_sequence(arr, dtype=dtype)
|
1328 |
+
expected = cls._from_sequence(data, dtype=dtype)
|
1329 |
+
tm.assert_extension_array_equal(result, expected)
|
1330 |
+
|
1331 |
+
func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype]
|
1332 |
+
result = func(arr).array
|
1333 |
+
expected = func(data).array
|
1334 |
+
tm.assert_equal(result, expected)
|
1335 |
+
|
1336 |
+
# Let's check the Indexes while we're here
|
1337 |
+
idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype]
|
1338 |
+
result = idx_cls(arr)
|
1339 |
+
expected = idx_cls(data)
|
1340 |
+
tm.assert_index_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimes.py
ADDED
@@ -0,0 +1,840 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for DatetimeArray
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from datetime import timedelta
|
7 |
+
import operator
|
8 |
+
|
9 |
+
try:
|
10 |
+
from zoneinfo import ZoneInfo
|
11 |
+
except ImportError:
|
12 |
+
# Cannot assign to a type
|
13 |
+
ZoneInfo = None # type: ignore[misc, assignment]
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
import pytest
|
17 |
+
|
18 |
+
from pandas._libs.tslibs import tz_compare
|
19 |
+
|
20 |
+
from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
21 |
+
|
22 |
+
import pandas as pd
|
23 |
+
import pandas._testing as tm
|
24 |
+
from pandas.core.arrays import (
|
25 |
+
DatetimeArray,
|
26 |
+
TimedeltaArray,
|
27 |
+
)
|
28 |
+
|
29 |
+
|
30 |
+
class TestNonNano:
|
31 |
+
@pytest.fixture(params=["s", "ms", "us"])
|
32 |
+
def unit(self, request):
|
33 |
+
"""Fixture returning parametrized time units"""
|
34 |
+
return request.param
|
35 |
+
|
36 |
+
@pytest.fixture
|
37 |
+
def dtype(self, unit, tz_naive_fixture):
|
38 |
+
tz = tz_naive_fixture
|
39 |
+
if tz is None:
|
40 |
+
return np.dtype(f"datetime64[{unit}]")
|
41 |
+
else:
|
42 |
+
return DatetimeTZDtype(unit=unit, tz=tz)
|
43 |
+
|
44 |
+
@pytest.fixture
|
45 |
+
def dta_dti(self, unit, dtype):
|
46 |
+
tz = getattr(dtype, "tz", None)
|
47 |
+
|
48 |
+
dti = pd.date_range("2016-01-01", periods=55, freq="D", tz=tz)
|
49 |
+
if tz is None:
|
50 |
+
arr = np.asarray(dti).astype(f"M8[{unit}]")
|
51 |
+
else:
|
52 |
+
arr = np.asarray(dti.tz_convert("UTC").tz_localize(None)).astype(
|
53 |
+
f"M8[{unit}]"
|
54 |
+
)
|
55 |
+
|
56 |
+
dta = DatetimeArray._simple_new(arr, dtype=dtype)
|
57 |
+
return dta, dti
|
58 |
+
|
59 |
+
@pytest.fixture
|
60 |
+
def dta(self, dta_dti):
|
61 |
+
dta, dti = dta_dti
|
62 |
+
return dta
|
63 |
+
|
64 |
+
def test_non_nano(self, unit, dtype):
|
65 |
+
arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]")
|
66 |
+
dta = DatetimeArray._simple_new(arr, dtype=dtype)
|
67 |
+
|
68 |
+
assert dta.dtype == dtype
|
69 |
+
assert dta[0].unit == unit
|
70 |
+
assert tz_compare(dta.tz, dta[0].tz)
|
71 |
+
assert (dta[0] == dta[:1]).all()
|
72 |
+
|
73 |
+
@pytest.mark.parametrize(
|
74 |
+
"field", DatetimeArray._field_ops + DatetimeArray._bool_ops
|
75 |
+
)
|
76 |
+
def test_fields(self, unit, field, dtype, dta_dti):
|
77 |
+
dta, dti = dta_dti
|
78 |
+
|
79 |
+
assert (dti == dta).all()
|
80 |
+
|
81 |
+
res = getattr(dta, field)
|
82 |
+
expected = getattr(dti._data, field)
|
83 |
+
tm.assert_numpy_array_equal(res, expected)
|
84 |
+
|
85 |
+
def test_normalize(self, unit):
|
86 |
+
dti = pd.date_range("2016-01-01 06:00:00", periods=55, freq="D")
|
87 |
+
arr = np.asarray(dti).astype(f"M8[{unit}]")
|
88 |
+
|
89 |
+
dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)
|
90 |
+
|
91 |
+
assert not dta.is_normalized
|
92 |
+
|
93 |
+
# TODO: simplify once we can just .astype to other unit
|
94 |
+
exp = np.asarray(dti.normalize()).astype(f"M8[{unit}]")
|
95 |
+
expected = DatetimeArray._simple_new(exp, dtype=exp.dtype)
|
96 |
+
|
97 |
+
res = dta.normalize()
|
98 |
+
tm.assert_extension_array_equal(res, expected)
|
99 |
+
|
100 |
+
def test_simple_new_requires_match(self, unit):
|
101 |
+
arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]")
|
102 |
+
dtype = DatetimeTZDtype(unit, "UTC")
|
103 |
+
|
104 |
+
dta = DatetimeArray._simple_new(arr, dtype=dtype)
|
105 |
+
assert dta.dtype == dtype
|
106 |
+
|
107 |
+
wrong = DatetimeTZDtype("ns", "UTC")
|
108 |
+
with pytest.raises(AssertionError, match=""):
|
109 |
+
DatetimeArray._simple_new(arr, dtype=wrong)
|
110 |
+
|
111 |
+
def test_std_non_nano(self, unit):
|
112 |
+
dti = pd.date_range("2016-01-01", periods=55, freq="D")
|
113 |
+
arr = np.asarray(dti).astype(f"M8[{unit}]")
|
114 |
+
|
115 |
+
dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)
|
116 |
+
|
117 |
+
# we should match the nano-reso std, but floored to our reso.
|
118 |
+
res = dta.std()
|
119 |
+
assert res._creso == dta._creso
|
120 |
+
assert res == dti.std().floor(unit)
|
121 |
+
|
122 |
+
@pytest.mark.filterwarnings("ignore:Converting to PeriodArray.*:UserWarning")
|
123 |
+
def test_to_period(self, dta_dti):
|
124 |
+
dta, dti = dta_dti
|
125 |
+
result = dta.to_period("D")
|
126 |
+
expected = dti._data.to_period("D")
|
127 |
+
|
128 |
+
tm.assert_extension_array_equal(result, expected)
|
129 |
+
|
130 |
+
def test_iter(self, dta):
|
131 |
+
res = next(iter(dta))
|
132 |
+
expected = dta[0]
|
133 |
+
|
134 |
+
assert type(res) is pd.Timestamp
|
135 |
+
assert res._value == expected._value
|
136 |
+
assert res._creso == expected._creso
|
137 |
+
assert res == expected
|
138 |
+
|
139 |
+
def test_astype_object(self, dta):
|
140 |
+
result = dta.astype(object)
|
141 |
+
assert all(x._creso == dta._creso for x in result)
|
142 |
+
assert all(x == y for x, y in zip(result, dta))
|
143 |
+
|
144 |
+
def test_to_pydatetime(self, dta_dti):
|
145 |
+
dta, dti = dta_dti
|
146 |
+
|
147 |
+
result = dta.to_pydatetime()
|
148 |
+
expected = dti.to_pydatetime()
|
149 |
+
tm.assert_numpy_array_equal(result, expected)
|
150 |
+
|
151 |
+
@pytest.mark.parametrize("meth", ["time", "timetz", "date"])
|
152 |
+
def test_time_date(self, dta_dti, meth):
|
153 |
+
dta, dti = dta_dti
|
154 |
+
|
155 |
+
result = getattr(dta, meth)
|
156 |
+
expected = getattr(dti, meth)
|
157 |
+
tm.assert_numpy_array_equal(result, expected)
|
158 |
+
|
159 |
+
def test_format_native_types(self, unit, dtype, dta_dti):
|
160 |
+
# In this case we should get the same formatted values with our nano
|
161 |
+
# version dti._data as we do with the non-nano dta
|
162 |
+
dta, dti = dta_dti
|
163 |
+
|
164 |
+
res = dta._format_native_types()
|
165 |
+
exp = dti._data._format_native_types()
|
166 |
+
tm.assert_numpy_array_equal(res, exp)
|
167 |
+
|
168 |
+
def test_repr(self, dta_dti, unit):
|
169 |
+
dta, dti = dta_dti
|
170 |
+
|
171 |
+
assert repr(dta) == repr(dti._data).replace("[ns", f"[{unit}")
|
172 |
+
|
173 |
+
# TODO: tests with td64
|
174 |
+
def test_compare_mismatched_resolutions(self, comparison_op):
|
175 |
+
# comparison that numpy gets wrong bc of silent overflows
|
176 |
+
op = comparison_op
|
177 |
+
|
178 |
+
iinfo = np.iinfo(np.int64)
|
179 |
+
vals = np.array([iinfo.min, iinfo.min + 1, iinfo.max], dtype=np.int64)
|
180 |
+
|
181 |
+
# Construct so that arr2[1] < arr[1] < arr[2] < arr2[2]
|
182 |
+
arr = np.array(vals).view("M8[ns]")
|
183 |
+
arr2 = arr.view("M8[s]")
|
184 |
+
|
185 |
+
left = DatetimeArray._simple_new(arr, dtype=arr.dtype)
|
186 |
+
right = DatetimeArray._simple_new(arr2, dtype=arr2.dtype)
|
187 |
+
|
188 |
+
if comparison_op is operator.eq:
|
189 |
+
expected = np.array([False, False, False])
|
190 |
+
elif comparison_op is operator.ne:
|
191 |
+
expected = np.array([True, True, True])
|
192 |
+
elif comparison_op in [operator.lt, operator.le]:
|
193 |
+
expected = np.array([False, False, True])
|
194 |
+
else:
|
195 |
+
expected = np.array([False, True, False])
|
196 |
+
|
197 |
+
result = op(left, right)
|
198 |
+
tm.assert_numpy_array_equal(result, expected)
|
199 |
+
|
200 |
+
result = op(left[1], right)
|
201 |
+
tm.assert_numpy_array_equal(result, expected)
|
202 |
+
|
203 |
+
if op not in [operator.eq, operator.ne]:
|
204 |
+
# check that numpy still gets this wrong; if it is fixed we may be
|
205 |
+
# able to remove compare_mismatched_resolutions
|
206 |
+
np_res = op(left._ndarray, right._ndarray)
|
207 |
+
tm.assert_numpy_array_equal(np_res[1:], ~expected[1:])
|
208 |
+
|
209 |
+
def test_add_mismatched_reso_doesnt_downcast(self):
|
210 |
+
# https://github.com/pandas-dev/pandas/pull/48748#issuecomment-1260181008
|
211 |
+
td = pd.Timedelta(microseconds=1)
|
212 |
+
dti = pd.date_range("2016-01-01", periods=3) - td
|
213 |
+
dta = dti._data.as_unit("us")
|
214 |
+
|
215 |
+
res = dta + td.as_unit("us")
|
216 |
+
# even though the result is an even number of days
|
217 |
+
# (so we _could_ downcast to unit="s"), we do not.
|
218 |
+
assert res.unit == "us"
|
219 |
+
|
220 |
+
@pytest.mark.parametrize(
|
221 |
+
"scalar",
|
222 |
+
[
|
223 |
+
timedelta(hours=2),
|
224 |
+
pd.Timedelta(hours=2),
|
225 |
+
np.timedelta64(2, "h"),
|
226 |
+
np.timedelta64(2 * 3600 * 1000, "ms"),
|
227 |
+
pd.offsets.Minute(120),
|
228 |
+
pd.offsets.Hour(2),
|
229 |
+
],
|
230 |
+
)
|
231 |
+
def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar):
|
232 |
+
dta, dti = dta_dti
|
233 |
+
|
234 |
+
td = pd.Timedelta(scalar)
|
235 |
+
exp_unit = tm.get_finest_unit(dta.unit, td.unit)
|
236 |
+
|
237 |
+
expected = (dti + td)._data.as_unit(exp_unit)
|
238 |
+
result = dta + scalar
|
239 |
+
tm.assert_extension_array_equal(result, expected)
|
240 |
+
|
241 |
+
result = scalar + dta
|
242 |
+
tm.assert_extension_array_equal(result, expected)
|
243 |
+
|
244 |
+
expected = (dti - td)._data.as_unit(exp_unit)
|
245 |
+
result = dta - scalar
|
246 |
+
tm.assert_extension_array_equal(result, expected)
|
247 |
+
|
248 |
+
def test_sub_datetimelike_scalar_mismatch(self):
|
249 |
+
dti = pd.date_range("2016-01-01", periods=3)
|
250 |
+
dta = dti._data.as_unit("us")
|
251 |
+
|
252 |
+
ts = dta[0].as_unit("s")
|
253 |
+
|
254 |
+
result = dta - ts
|
255 |
+
expected = (dti - dti[0])._data.as_unit("us")
|
256 |
+
assert result.dtype == "m8[us]"
|
257 |
+
tm.assert_extension_array_equal(result, expected)
|
258 |
+
|
259 |
+
def test_sub_datetime64_reso_mismatch(self):
|
260 |
+
dti = pd.date_range("2016-01-01", periods=3)
|
261 |
+
left = dti._data.as_unit("s")
|
262 |
+
right = left.as_unit("ms")
|
263 |
+
|
264 |
+
result = left - right
|
265 |
+
exp_values = np.array([0, 0, 0], dtype="m8[ms]")
|
266 |
+
expected = TimedeltaArray._simple_new(
|
267 |
+
exp_values,
|
268 |
+
dtype=exp_values.dtype,
|
269 |
+
)
|
270 |
+
tm.assert_extension_array_equal(result, expected)
|
271 |
+
result2 = right - left
|
272 |
+
tm.assert_extension_array_equal(result2, expected)
|
273 |
+
|
274 |
+
|
275 |
+
class TestDatetimeArrayComparisons:
|
276 |
+
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
|
277 |
+
# sufficiently robust
|
278 |
+
|
279 |
+
def test_cmp_dt64_arraylike_tznaive(self, comparison_op):
|
280 |
+
# arbitrary tz-naive DatetimeIndex
|
281 |
+
op = comparison_op
|
282 |
+
|
283 |
+
dti = pd.date_range("2016-01-1", freq="MS", periods=9, tz=None)
|
284 |
+
arr = dti._data
|
285 |
+
assert arr.freq == dti.freq
|
286 |
+
assert arr.tz == dti.tz
|
287 |
+
|
288 |
+
right = dti
|
289 |
+
|
290 |
+
expected = np.ones(len(arr), dtype=bool)
|
291 |
+
if comparison_op.__name__ in ["ne", "gt", "lt"]:
|
292 |
+
# for these the comparisons should be all-False
|
293 |
+
expected = ~expected
|
294 |
+
|
295 |
+
result = op(arr, arr)
|
296 |
+
tm.assert_numpy_array_equal(result, expected)
|
297 |
+
for other in [
|
298 |
+
right,
|
299 |
+
np.array(right),
|
300 |
+
list(right),
|
301 |
+
tuple(right),
|
302 |
+
right.astype(object),
|
303 |
+
]:
|
304 |
+
result = op(arr, other)
|
305 |
+
tm.assert_numpy_array_equal(result, expected)
|
306 |
+
|
307 |
+
result = op(other, arr)
|
308 |
+
tm.assert_numpy_array_equal(result, expected)
|
309 |
+
|
310 |
+
|
311 |
+
class TestDatetimeArray:
|
312 |
+
def test_astype_ns_to_ms_near_bounds(self):
|
313 |
+
# GH#55979
|
314 |
+
ts = pd.Timestamp("1677-09-21 00:12:43.145225")
|
315 |
+
target = ts.as_unit("ms")
|
316 |
+
|
317 |
+
dta = DatetimeArray._from_sequence([ts], dtype="M8[ns]")
|
318 |
+
assert (dta.view("i8") == ts.as_unit("ns").value).all()
|
319 |
+
|
320 |
+
result = dta.astype("M8[ms]")
|
321 |
+
assert result[0] == target
|
322 |
+
|
323 |
+
expected = DatetimeArray._from_sequence([ts], dtype="M8[ms]")
|
324 |
+
assert (expected.view("i8") == target._value).all()
|
325 |
+
|
326 |
+
tm.assert_datetime_array_equal(result, expected)
|
327 |
+
|
328 |
+
def test_astype_non_nano_tznaive(self):
|
329 |
+
dti = pd.date_range("2016-01-01", periods=3)
|
330 |
+
|
331 |
+
res = dti.astype("M8[s]")
|
332 |
+
assert res.dtype == "M8[s]"
|
333 |
+
|
334 |
+
dta = dti._data
|
335 |
+
res = dta.astype("M8[s]")
|
336 |
+
assert res.dtype == "M8[s]"
|
337 |
+
assert isinstance(res, pd.core.arrays.DatetimeArray) # used to be ndarray
|
338 |
+
|
339 |
+
def test_astype_non_nano_tzaware(self):
|
340 |
+
dti = pd.date_range("2016-01-01", periods=3, tz="UTC")
|
341 |
+
|
342 |
+
res = dti.astype("M8[s, US/Pacific]")
|
343 |
+
assert res.dtype == "M8[s, US/Pacific]"
|
344 |
+
|
345 |
+
dta = dti._data
|
346 |
+
res = dta.astype("M8[s, US/Pacific]")
|
347 |
+
assert res.dtype == "M8[s, US/Pacific]"
|
348 |
+
|
349 |
+
# from non-nano to non-nano, preserving reso
|
350 |
+
res2 = res.astype("M8[s, UTC]")
|
351 |
+
assert res2.dtype == "M8[s, UTC]"
|
352 |
+
assert not tm.shares_memory(res2, res)
|
353 |
+
|
354 |
+
res3 = res.astype("M8[s, UTC]", copy=False)
|
355 |
+
assert res2.dtype == "M8[s, UTC]"
|
356 |
+
assert tm.shares_memory(res3, res)
|
357 |
+
|
358 |
+
def test_astype_to_same(self):
|
359 |
+
arr = DatetimeArray._from_sequence(
|
360 |
+
["2000"], dtype=DatetimeTZDtype(tz="US/Central")
|
361 |
+
)
|
362 |
+
result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False)
|
363 |
+
assert result is arr
|
364 |
+
|
365 |
+
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "datetime64[ns, UTC]"])
|
366 |
+
@pytest.mark.parametrize(
|
367 |
+
"other", ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, CET]"]
|
368 |
+
)
|
369 |
+
def test_astype_copies(self, dtype, other):
|
370 |
+
# https://github.com/pandas-dev/pandas/pull/32490
|
371 |
+
ser = pd.Series([1, 2], dtype=dtype)
|
372 |
+
orig = ser.copy()
|
373 |
+
|
374 |
+
err = False
|
375 |
+
if (dtype == "datetime64[ns]") ^ (other == "datetime64[ns]"):
|
376 |
+
# deprecated in favor of tz_localize
|
377 |
+
err = True
|
378 |
+
|
379 |
+
if err:
|
380 |
+
if dtype == "datetime64[ns]":
|
381 |
+
msg = "Use obj.tz_localize instead or series.dt.tz_localize instead"
|
382 |
+
else:
|
383 |
+
msg = "from timezone-aware dtype to timezone-naive dtype"
|
384 |
+
with pytest.raises(TypeError, match=msg):
|
385 |
+
ser.astype(other)
|
386 |
+
else:
|
387 |
+
t = ser.astype(other)
|
388 |
+
t[:] = pd.NaT
|
389 |
+
tm.assert_series_equal(ser, orig)
|
390 |
+
|
391 |
+
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
|
392 |
+
def test_astype_int(self, dtype):
|
393 |
+
arr = DatetimeArray._from_sequence(
|
394 |
+
[pd.Timestamp("2000"), pd.Timestamp("2001")], dtype="M8[ns]"
|
395 |
+
)
|
396 |
+
|
397 |
+
if np.dtype(dtype) != np.int64:
|
398 |
+
with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):
|
399 |
+
arr.astype(dtype)
|
400 |
+
return
|
401 |
+
|
402 |
+
result = arr.astype(dtype)
|
403 |
+
expected = arr._ndarray.view("i8")
|
404 |
+
tm.assert_numpy_array_equal(result, expected)
|
405 |
+
|
406 |
+
def test_astype_to_sparse_dt64(self):
|
407 |
+
# GH#50082
|
408 |
+
dti = pd.date_range("2016-01-01", periods=4)
|
409 |
+
dta = dti._data
|
410 |
+
result = dta.astype("Sparse[datetime64[ns]]")
|
411 |
+
|
412 |
+
assert result.dtype == "Sparse[datetime64[ns]]"
|
413 |
+
assert (result == dta).all()
|
414 |
+
|
415 |
+
def test_tz_setter_raises(self):
|
416 |
+
arr = DatetimeArray._from_sequence(
|
417 |
+
["2000"], dtype=DatetimeTZDtype(tz="US/Central")
|
418 |
+
)
|
419 |
+
with pytest.raises(AttributeError, match="tz_localize"):
|
420 |
+
arr.tz = "UTC"
|
421 |
+
|
422 |
+
def test_setitem_str_impute_tz(self, tz_naive_fixture):
|
423 |
+
# Like for getitem, if we are passed a naive-like string, we impute
|
424 |
+
# our own timezone.
|
425 |
+
tz = tz_naive_fixture
|
426 |
+
|
427 |
+
data = np.array([1, 2, 3], dtype="M8[ns]")
|
428 |
+
dtype = data.dtype if tz is None else DatetimeTZDtype(tz=tz)
|
429 |
+
arr = DatetimeArray._from_sequence(data, dtype=dtype)
|
430 |
+
expected = arr.copy()
|
431 |
+
|
432 |
+
ts = pd.Timestamp("2020-09-08 16:50").tz_localize(tz)
|
433 |
+
setter = str(ts.tz_localize(None))
|
434 |
+
|
435 |
+
# Setting a scalar tznaive string
|
436 |
+
expected[0] = ts
|
437 |
+
arr[0] = setter
|
438 |
+
tm.assert_equal(arr, expected)
|
439 |
+
|
440 |
+
# Setting a listlike of tznaive strings
|
441 |
+
expected[1] = ts
|
442 |
+
arr[:2] = [setter, setter]
|
443 |
+
tm.assert_equal(arr, expected)
|
444 |
+
|
445 |
+
def test_setitem_different_tz_raises(self):
|
446 |
+
# pre-2.0 we required exact tz match, in 2.0 we require only
|
447 |
+
# tzawareness-match
|
448 |
+
data = np.array([1, 2, 3], dtype="M8[ns]")
|
449 |
+
arr = DatetimeArray._from_sequence(
|
450 |
+
data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")
|
451 |
+
)
|
452 |
+
with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"):
|
453 |
+
arr[0] = pd.Timestamp("2000")
|
454 |
+
|
455 |
+
ts = pd.Timestamp("2000", tz="US/Eastern")
|
456 |
+
arr[0] = ts
|
457 |
+
assert arr[0] == ts.tz_convert("US/Central")
|
458 |
+
|
459 |
+
def test_setitem_clears_freq(self):
|
460 |
+
a = pd.date_range("2000", periods=2, freq="D", tz="US/Central")._data
|
461 |
+
a[0] = pd.Timestamp("2000", tz="US/Central")
|
462 |
+
assert a.freq is None
|
463 |
+
|
464 |
+
@pytest.mark.parametrize(
|
465 |
+
"obj",
|
466 |
+
[
|
467 |
+
pd.Timestamp("2021-01-01"),
|
468 |
+
pd.Timestamp("2021-01-01").to_datetime64(),
|
469 |
+
pd.Timestamp("2021-01-01").to_pydatetime(),
|
470 |
+
],
|
471 |
+
)
|
472 |
+
def test_setitem_objects(self, obj):
|
473 |
+
# make sure we accept datetime64 and datetime in addition to Timestamp
|
474 |
+
dti = pd.date_range("2000", periods=2, freq="D")
|
475 |
+
arr = dti._data
|
476 |
+
|
477 |
+
arr[0] = obj
|
478 |
+
assert arr[0] == obj
|
479 |
+
|
480 |
+
def test_repeat_preserves_tz(self):
|
481 |
+
dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")
|
482 |
+
arr = dti._data
|
483 |
+
|
484 |
+
repeated = arr.repeat([1, 1])
|
485 |
+
|
486 |
+
# preserves tz and values, but not freq
|
487 |
+
expected = DatetimeArray._from_sequence(arr.asi8, dtype=arr.dtype)
|
488 |
+
tm.assert_equal(repeated, expected)
|
489 |
+
|
490 |
+
def test_value_counts_preserves_tz(self):
|
491 |
+
dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")
|
492 |
+
arr = dti._data.repeat([4, 3])
|
493 |
+
|
494 |
+
result = arr.value_counts()
|
495 |
+
|
496 |
+
# Note: not tm.assert_index_equal, since `freq`s do not match
|
497 |
+
assert result.index.equals(dti)
|
498 |
+
|
499 |
+
arr[-2] = pd.NaT
|
500 |
+
result = arr.value_counts(dropna=False)
|
501 |
+
expected = pd.Series([4, 2, 1], index=[dti[0], dti[1], pd.NaT], name="count")
|
502 |
+
tm.assert_series_equal(result, expected)
|
503 |
+
|
504 |
+
@pytest.mark.parametrize("method", ["pad", "backfill"])
|
505 |
+
def test_fillna_preserves_tz(self, method):
|
506 |
+
dti = pd.date_range("2000-01-01", periods=5, freq="D", tz="US/Central")
|
507 |
+
arr = DatetimeArray._from_sequence(dti, copy=True)
|
508 |
+
arr[2] = pd.NaT
|
509 |
+
|
510 |
+
fill_val = dti[1] if method == "pad" else dti[3]
|
511 |
+
expected = DatetimeArray._from_sequence(
|
512 |
+
[dti[0], dti[1], fill_val, dti[3], dti[4]],
|
513 |
+
dtype=DatetimeTZDtype(tz="US/Central"),
|
514 |
+
)
|
515 |
+
|
516 |
+
result = arr._pad_or_backfill(method=method)
|
517 |
+
tm.assert_extension_array_equal(result, expected)
|
518 |
+
|
519 |
+
# assert that arr and dti were not modified in-place
|
520 |
+
assert arr[2] is pd.NaT
|
521 |
+
assert dti[2] == pd.Timestamp("2000-01-03", tz="US/Central")
|
522 |
+
|
523 |
+
def test_fillna_2d(self):
|
524 |
+
dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific")
|
525 |
+
dta = dti._data.reshape(3, 2).copy()
|
526 |
+
dta[0, 1] = pd.NaT
|
527 |
+
dta[1, 0] = pd.NaT
|
528 |
+
|
529 |
+
res1 = dta._pad_or_backfill(method="pad")
|
530 |
+
expected1 = dta.copy()
|
531 |
+
expected1[1, 0] = dta[0, 0]
|
532 |
+
tm.assert_extension_array_equal(res1, expected1)
|
533 |
+
|
534 |
+
res2 = dta._pad_or_backfill(method="backfill")
|
535 |
+
expected2 = dta.copy()
|
536 |
+
expected2 = dta.copy()
|
537 |
+
expected2[1, 0] = dta[2, 0]
|
538 |
+
expected2[0, 1] = dta[1, 1]
|
539 |
+
tm.assert_extension_array_equal(res2, expected2)
|
540 |
+
|
541 |
+
# with different ordering for underlying ndarray; behavior should
|
542 |
+
# be unchanged
|
543 |
+
dta2 = dta._from_backing_data(dta._ndarray.copy(order="F"))
|
544 |
+
assert dta2._ndarray.flags["F_CONTIGUOUS"]
|
545 |
+
assert not dta2._ndarray.flags["C_CONTIGUOUS"]
|
546 |
+
tm.assert_extension_array_equal(dta, dta2)
|
547 |
+
|
548 |
+
res3 = dta2._pad_or_backfill(method="pad")
|
549 |
+
tm.assert_extension_array_equal(res3, expected1)
|
550 |
+
|
551 |
+
res4 = dta2._pad_or_backfill(method="backfill")
|
552 |
+
tm.assert_extension_array_equal(res4, expected2)
|
553 |
+
|
554 |
+
# test the DataFrame method while we're here
|
555 |
+
df = pd.DataFrame(dta)
|
556 |
+
res = df.ffill()
|
557 |
+
expected = pd.DataFrame(expected1)
|
558 |
+
tm.assert_frame_equal(res, expected)
|
559 |
+
|
560 |
+
res = df.bfill()
|
561 |
+
expected = pd.DataFrame(expected2)
|
562 |
+
tm.assert_frame_equal(res, expected)
|
563 |
+
|
564 |
+
def test_array_interface_tz(self):
|
565 |
+
tz = "US/Central"
|
566 |
+
data = pd.date_range("2017", periods=2, tz=tz)._data
|
567 |
+
result = np.asarray(data)
|
568 |
+
|
569 |
+
expected = np.array(
|
570 |
+
[
|
571 |
+
pd.Timestamp("2017-01-01T00:00:00", tz=tz),
|
572 |
+
pd.Timestamp("2017-01-02T00:00:00", tz=tz),
|
573 |
+
],
|
574 |
+
dtype=object,
|
575 |
+
)
|
576 |
+
tm.assert_numpy_array_equal(result, expected)
|
577 |
+
|
578 |
+
result = np.asarray(data, dtype=object)
|
579 |
+
tm.assert_numpy_array_equal(result, expected)
|
580 |
+
|
581 |
+
result = np.asarray(data, dtype="M8[ns]")
|
582 |
+
|
583 |
+
expected = np.array(
|
584 |
+
["2017-01-01T06:00:00", "2017-01-02T06:00:00"], dtype="M8[ns]"
|
585 |
+
)
|
586 |
+
tm.assert_numpy_array_equal(result, expected)
|
587 |
+
|
588 |
+
def test_array_interface(self):
|
589 |
+
data = pd.date_range("2017", periods=2)._data
|
590 |
+
expected = np.array(
|
591 |
+
["2017-01-01T00:00:00", "2017-01-02T00:00:00"], dtype="datetime64[ns]"
|
592 |
+
)
|
593 |
+
|
594 |
+
result = np.asarray(data)
|
595 |
+
tm.assert_numpy_array_equal(result, expected)
|
596 |
+
|
597 |
+
result = np.asarray(data, dtype=object)
|
598 |
+
expected = np.array(
|
599 |
+
[pd.Timestamp("2017-01-01T00:00:00"), pd.Timestamp("2017-01-02T00:00:00")],
|
600 |
+
dtype=object,
|
601 |
+
)
|
602 |
+
tm.assert_numpy_array_equal(result, expected)
|
603 |
+
|
604 |
+
@pytest.mark.parametrize("index", [True, False])
|
605 |
+
def test_searchsorted_different_tz(self, index):
|
606 |
+
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
607 |
+
arr = pd.DatetimeIndex(data, freq="D")._data.tz_localize("Asia/Tokyo")
|
608 |
+
if index:
|
609 |
+
arr = pd.Index(arr)
|
610 |
+
|
611 |
+
expected = arr.searchsorted(arr[2])
|
612 |
+
result = arr.searchsorted(arr[2].tz_convert("UTC"))
|
613 |
+
assert result == expected
|
614 |
+
|
615 |
+
expected = arr.searchsorted(arr[2:6])
|
616 |
+
result = arr.searchsorted(arr[2:6].tz_convert("UTC"))
|
617 |
+
tm.assert_equal(result, expected)
|
618 |
+
|
619 |
+
@pytest.mark.parametrize("index", [True, False])
|
620 |
+
def test_searchsorted_tzawareness_compat(self, index):
|
621 |
+
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
622 |
+
arr = pd.DatetimeIndex(data, freq="D")._data
|
623 |
+
if index:
|
624 |
+
arr = pd.Index(arr)
|
625 |
+
|
626 |
+
mismatch = arr.tz_localize("Asia/Tokyo")
|
627 |
+
|
628 |
+
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
|
629 |
+
with pytest.raises(TypeError, match=msg):
|
630 |
+
arr.searchsorted(mismatch[0])
|
631 |
+
with pytest.raises(TypeError, match=msg):
|
632 |
+
arr.searchsorted(mismatch)
|
633 |
+
|
634 |
+
with pytest.raises(TypeError, match=msg):
|
635 |
+
mismatch.searchsorted(arr[0])
|
636 |
+
with pytest.raises(TypeError, match=msg):
|
637 |
+
mismatch.searchsorted(arr)
|
638 |
+
|
639 |
+
@pytest.mark.parametrize(
|
640 |
+
"other",
|
641 |
+
[
|
642 |
+
1,
|
643 |
+
np.int64(1),
|
644 |
+
1.0,
|
645 |
+
np.timedelta64("NaT"),
|
646 |
+
pd.Timedelta(days=2),
|
647 |
+
"invalid",
|
648 |
+
np.arange(10, dtype="i8") * 24 * 3600 * 10**9,
|
649 |
+
np.arange(10).view("timedelta64[ns]") * 24 * 3600 * 10**9,
|
650 |
+
pd.Timestamp("2021-01-01").to_period("D"),
|
651 |
+
],
|
652 |
+
)
|
653 |
+
@pytest.mark.parametrize("index", [True, False])
|
654 |
+
def test_searchsorted_invalid_types(self, other, index):
|
655 |
+
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
656 |
+
arr = pd.DatetimeIndex(data, freq="D")._data
|
657 |
+
if index:
|
658 |
+
arr = pd.Index(arr)
|
659 |
+
|
660 |
+
msg = "|".join(
|
661 |
+
[
|
662 |
+
"searchsorted requires compatible dtype or scalar",
|
663 |
+
"value should be a 'Timestamp', 'NaT', or array of those. Got",
|
664 |
+
]
|
665 |
+
)
|
666 |
+
with pytest.raises(TypeError, match=msg):
|
667 |
+
arr.searchsorted(other)
|
668 |
+
|
669 |
+
def test_shift_fill_value(self):
|
670 |
+
dti = pd.date_range("2016-01-01", periods=3)
|
671 |
+
|
672 |
+
dta = dti._data
|
673 |
+
expected = DatetimeArray._from_sequence(np.roll(dta._ndarray, 1))
|
674 |
+
|
675 |
+
fv = dta[-1]
|
676 |
+
for fill_value in [fv, fv.to_pydatetime(), fv.to_datetime64()]:
|
677 |
+
result = dta.shift(1, fill_value=fill_value)
|
678 |
+
tm.assert_datetime_array_equal(result, expected)
|
679 |
+
|
680 |
+
dta = dta.tz_localize("UTC")
|
681 |
+
expected = expected.tz_localize("UTC")
|
682 |
+
fv = dta[-1]
|
683 |
+
for fill_value in [fv, fv.to_pydatetime()]:
|
684 |
+
result = dta.shift(1, fill_value=fill_value)
|
685 |
+
tm.assert_datetime_array_equal(result, expected)
|
686 |
+
|
687 |
+
def test_shift_value_tzawareness_mismatch(self):
|
688 |
+
dti = pd.date_range("2016-01-01", periods=3)
|
689 |
+
|
690 |
+
dta = dti._data
|
691 |
+
|
692 |
+
fv = dta[-1].tz_localize("UTC")
|
693 |
+
for invalid in [fv, fv.to_pydatetime()]:
|
694 |
+
with pytest.raises(TypeError, match="Cannot compare"):
|
695 |
+
dta.shift(1, fill_value=invalid)
|
696 |
+
|
697 |
+
dta = dta.tz_localize("UTC")
|
698 |
+
fv = dta[-1].tz_localize(None)
|
699 |
+
for invalid in [fv, fv.to_pydatetime(), fv.to_datetime64()]:
|
700 |
+
with pytest.raises(TypeError, match="Cannot compare"):
|
701 |
+
dta.shift(1, fill_value=invalid)
|
702 |
+
|
703 |
+
def test_shift_requires_tzmatch(self):
|
704 |
+
# pre-2.0 we required exact tz match, in 2.0 we require just
|
705 |
+
# matching tzawareness
|
706 |
+
dti = pd.date_range("2016-01-01", periods=3, tz="UTC")
|
707 |
+
dta = dti._data
|
708 |
+
|
709 |
+
fill_value = pd.Timestamp("2020-10-18 18:44", tz="US/Pacific")
|
710 |
+
|
711 |
+
result = dta.shift(1, fill_value=fill_value)
|
712 |
+
expected = dta.shift(1, fill_value=fill_value.tz_convert("UTC"))
|
713 |
+
tm.assert_equal(result, expected)
|
714 |
+
|
715 |
+
def test_tz_localize_t2d(self):
|
716 |
+
dti = pd.date_range("1994-05-12", periods=12, tz="US/Pacific")
|
717 |
+
dta = dti._data.reshape(3, 4)
|
718 |
+
result = dta.tz_localize(None)
|
719 |
+
|
720 |
+
expected = dta.ravel().tz_localize(None).reshape(dta.shape)
|
721 |
+
tm.assert_datetime_array_equal(result, expected)
|
722 |
+
|
723 |
+
roundtrip = expected.tz_localize("US/Pacific")
|
724 |
+
tm.assert_datetime_array_equal(roundtrip, dta)
|
725 |
+
|
726 |
+
easts = ["US/Eastern", "dateutil/US/Eastern"]
|
727 |
+
if ZoneInfo is not None:
|
728 |
+
try:
|
729 |
+
tz = ZoneInfo("US/Eastern")
|
730 |
+
except KeyError:
|
731 |
+
# no tzdata
|
732 |
+
pass
|
733 |
+
else:
|
734 |
+
# Argument 1 to "append" of "list" has incompatible type "ZoneInfo";
|
735 |
+
# expected "str"
|
736 |
+
easts.append(tz) # type: ignore[arg-type]
|
737 |
+
|
738 |
+
@pytest.mark.parametrize("tz", easts)
|
739 |
+
def test_iter_zoneinfo_fold(self, tz):
|
740 |
+
# GH#49684
|
741 |
+
utc_vals = np.array(
|
742 |
+
[1320552000, 1320555600, 1320559200, 1320562800], dtype=np.int64
|
743 |
+
)
|
744 |
+
utc_vals *= 1_000_000_000
|
745 |
+
|
746 |
+
dta = DatetimeArray._from_sequence(utc_vals).tz_localize("UTC").tz_convert(tz)
|
747 |
+
|
748 |
+
left = dta[2]
|
749 |
+
right = list(dta)[2]
|
750 |
+
assert str(left) == str(right)
|
751 |
+
# previously there was a bug where with non-pytz right would be
|
752 |
+
# Timestamp('2011-11-06 01:00:00-0400', tz='US/Eastern')
|
753 |
+
# while left would be
|
754 |
+
# Timestamp('2011-11-06 01:00:00-0500', tz='US/Eastern')
|
755 |
+
# The .value's would match (so they would compare as equal),
|
756 |
+
# but the folds would not
|
757 |
+
assert left.utcoffset() == right.utcoffset()
|
758 |
+
|
759 |
+
# The same bug in ints_to_pydatetime affected .astype, so we test
|
760 |
+
# that here.
|
761 |
+
right2 = dta.astype(object)[2]
|
762 |
+
assert str(left) == str(right2)
|
763 |
+
assert left.utcoffset() == right2.utcoffset()
|
764 |
+
|
765 |
+
@pytest.mark.parametrize(
|
766 |
+
"freq, freq_depr",
|
767 |
+
[
|
768 |
+
("2ME", "2M"),
|
769 |
+
("2SME", "2SM"),
|
770 |
+
("2SME", "2sm"),
|
771 |
+
("2QE", "2Q"),
|
772 |
+
("2QE-SEP", "2Q-SEP"),
|
773 |
+
("1YE", "1Y"),
|
774 |
+
("2YE-MAR", "2Y-MAR"),
|
775 |
+
("1YE", "1A"),
|
776 |
+
("2YE-MAR", "2A-MAR"),
|
777 |
+
("2ME", "2m"),
|
778 |
+
("2QE-SEP", "2q-sep"),
|
779 |
+
("2YE-MAR", "2a-mar"),
|
780 |
+
("2YE", "2y"),
|
781 |
+
],
|
782 |
+
)
|
783 |
+
def test_date_range_frequency_M_Q_Y_A_deprecated(self, freq, freq_depr):
|
784 |
+
# GH#9586, GH#54275
|
785 |
+
depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed "
|
786 |
+
f"in a future version, please use '{freq[1:]}' instead."
|
787 |
+
|
788 |
+
expected = pd.date_range("1/1/2000", periods=4, freq=freq)
|
789 |
+
with tm.assert_produces_warning(FutureWarning, match=depr_msg):
|
790 |
+
result = pd.date_range("1/1/2000", periods=4, freq=freq_depr)
|
791 |
+
tm.assert_index_equal(result, expected)
|
792 |
+
|
793 |
+
@pytest.mark.parametrize("freq_depr", ["2H", "2CBH", "2MIN", "2S", "2mS", "2Us"])
|
794 |
+
def test_date_range_uppercase_frequency_deprecated(self, freq_depr):
|
795 |
+
# GH#9586, GH#54939
|
796 |
+
depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a "
|
797 |
+
f"future version. Please use '{freq_depr.lower()[1:]}' instead."
|
798 |
+
|
799 |
+
expected = pd.date_range("1/1/2000", periods=4, freq=freq_depr.lower())
|
800 |
+
with tm.assert_produces_warning(FutureWarning, match=depr_msg):
|
801 |
+
result = pd.date_range("1/1/2000", periods=4, freq=freq_depr)
|
802 |
+
tm.assert_index_equal(result, expected)
|
803 |
+
|
804 |
+
@pytest.mark.parametrize(
|
805 |
+
"freq_depr",
|
806 |
+
[
|
807 |
+
"2ye-mar",
|
808 |
+
"2ys",
|
809 |
+
"2qe",
|
810 |
+
"2qs-feb",
|
811 |
+
"2bqs",
|
812 |
+
"2sms",
|
813 |
+
"2bms",
|
814 |
+
"2cbme",
|
815 |
+
"2me",
|
816 |
+
"2w",
|
817 |
+
],
|
818 |
+
)
|
819 |
+
def test_date_range_lowercase_frequency_deprecated(self, freq_depr):
|
820 |
+
# GH#9586, GH#54939
|
821 |
+
depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a "
|
822 |
+
f"future version, please use '{freq_depr.upper()[1:]}' instead."
|
823 |
+
|
824 |
+
expected = pd.date_range("1/1/2000", periods=4, freq=freq_depr.upper())
|
825 |
+
with tm.assert_produces_warning(FutureWarning, match=depr_msg):
|
826 |
+
result = pd.date_range("1/1/2000", periods=4, freq=freq_depr)
|
827 |
+
tm.assert_index_equal(result, expected)
|
828 |
+
|
829 |
+
|
830 |
+
def test_factorize_sort_without_freq():
|
831 |
+
dta = DatetimeArray._from_sequence([0, 2, 1], dtype="M8[ns]")
|
832 |
+
|
833 |
+
msg = r"call pd.factorize\(obj, sort=True\) instead"
|
834 |
+
with pytest.raises(NotImplementedError, match=msg):
|
835 |
+
dta.factorize(sort=True)
|
836 |
+
|
837 |
+
# Do TimedeltaArray while we're here
|
838 |
+
tda = dta - dta[0]
|
839 |
+
with pytest.raises(NotImplementedError, match=msg):
|
840 |
+
tda.factorize(sort=True)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_timedeltas.py
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import timedelta
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
from pandas import Timedelta
|
8 |
+
import pandas._testing as tm
|
9 |
+
from pandas.core.arrays import (
|
10 |
+
DatetimeArray,
|
11 |
+
TimedeltaArray,
|
12 |
+
)
|
13 |
+
|
14 |
+
|
15 |
+
class TestNonNano:
|
16 |
+
@pytest.fixture(params=["s", "ms", "us"])
|
17 |
+
def unit(self, request):
|
18 |
+
return request.param
|
19 |
+
|
20 |
+
@pytest.fixture
|
21 |
+
def tda(self, unit):
|
22 |
+
arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
|
23 |
+
return TimedeltaArray._simple_new(arr, dtype=arr.dtype)
|
24 |
+
|
25 |
+
def test_non_nano(self, unit):
|
26 |
+
arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
|
27 |
+
tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype)
|
28 |
+
|
29 |
+
assert tda.dtype == arr.dtype
|
30 |
+
assert tda[0].unit == unit
|
31 |
+
|
32 |
+
def test_as_unit_raises(self, tda):
|
33 |
+
# GH#50616
|
34 |
+
with pytest.raises(ValueError, match="Supported units"):
|
35 |
+
tda.as_unit("D")
|
36 |
+
|
37 |
+
tdi = pd.Index(tda)
|
38 |
+
with pytest.raises(ValueError, match="Supported units"):
|
39 |
+
tdi.as_unit("D")
|
40 |
+
|
41 |
+
@pytest.mark.parametrize("field", TimedeltaArray._field_ops)
|
42 |
+
def test_fields(self, tda, field):
|
43 |
+
as_nano = tda._ndarray.astype("m8[ns]")
|
44 |
+
tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)
|
45 |
+
|
46 |
+
result = getattr(tda, field)
|
47 |
+
expected = getattr(tda_nano, field)
|
48 |
+
tm.assert_numpy_array_equal(result, expected)
|
49 |
+
|
50 |
+
def test_to_pytimedelta(self, tda):
|
51 |
+
as_nano = tda._ndarray.astype("m8[ns]")
|
52 |
+
tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)
|
53 |
+
|
54 |
+
result = tda.to_pytimedelta()
|
55 |
+
expected = tda_nano.to_pytimedelta()
|
56 |
+
tm.assert_numpy_array_equal(result, expected)
|
57 |
+
|
58 |
+
def test_total_seconds(self, unit, tda):
|
59 |
+
as_nano = tda._ndarray.astype("m8[ns]")
|
60 |
+
tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)
|
61 |
+
|
62 |
+
result = tda.total_seconds()
|
63 |
+
expected = tda_nano.total_seconds()
|
64 |
+
tm.assert_numpy_array_equal(result, expected)
|
65 |
+
|
66 |
+
def test_timedelta_array_total_seconds(self):
|
67 |
+
# GH34290
|
68 |
+
expected = Timedelta("2 min").total_seconds()
|
69 |
+
|
70 |
+
result = pd.array([Timedelta("2 min")]).total_seconds()[0]
|
71 |
+
assert result == expected
|
72 |
+
|
73 |
+
def test_total_seconds_nanoseconds(self):
|
74 |
+
# issue #48521
|
75 |
+
start_time = pd.Series(["2145-11-02 06:00:00"]).astype("datetime64[ns]")
|
76 |
+
end_time = pd.Series(["2145-11-02 07:06:00"]).astype("datetime64[ns]")
|
77 |
+
expected = (end_time - start_time).values / np.timedelta64(1, "s")
|
78 |
+
result = (end_time - start_time).dt.total_seconds().values
|
79 |
+
assert result == expected
|
80 |
+
|
81 |
+
@pytest.mark.parametrize(
|
82 |
+
"nat", [np.datetime64("NaT", "ns"), np.datetime64("NaT", "us")]
|
83 |
+
)
|
84 |
+
def test_add_nat_datetimelike_scalar(self, nat, tda):
|
85 |
+
result = tda + nat
|
86 |
+
assert isinstance(result, DatetimeArray)
|
87 |
+
assert result._creso == tda._creso
|
88 |
+
assert result.isna().all()
|
89 |
+
|
90 |
+
result = nat + tda
|
91 |
+
assert isinstance(result, DatetimeArray)
|
92 |
+
assert result._creso == tda._creso
|
93 |
+
assert result.isna().all()
|
94 |
+
|
95 |
+
def test_add_pdnat(self, tda):
|
96 |
+
result = tda + pd.NaT
|
97 |
+
assert isinstance(result, TimedeltaArray)
|
98 |
+
assert result._creso == tda._creso
|
99 |
+
assert result.isna().all()
|
100 |
+
|
101 |
+
result = pd.NaT + tda
|
102 |
+
assert isinstance(result, TimedeltaArray)
|
103 |
+
assert result._creso == tda._creso
|
104 |
+
assert result.isna().all()
|
105 |
+
|
106 |
+
# TODO: 2022-07-11 this is the only test that gets to DTA.tz_convert
|
107 |
+
# or tz_localize with non-nano; implement tests specific to that.
|
108 |
+
def test_add_datetimelike_scalar(self, tda, tz_naive_fixture):
|
109 |
+
ts = pd.Timestamp("2016-01-01", tz=tz_naive_fixture).as_unit("ns")
|
110 |
+
|
111 |
+
expected = tda.as_unit("ns") + ts
|
112 |
+
res = tda + ts
|
113 |
+
tm.assert_extension_array_equal(res, expected)
|
114 |
+
res = ts + tda
|
115 |
+
tm.assert_extension_array_equal(res, expected)
|
116 |
+
|
117 |
+
ts += Timedelta(1) # case where we can't cast losslessly
|
118 |
+
|
119 |
+
exp_values = tda._ndarray + ts.asm8
|
120 |
+
expected = (
|
121 |
+
DatetimeArray._simple_new(exp_values, dtype=exp_values.dtype)
|
122 |
+
.tz_localize("UTC")
|
123 |
+
.tz_convert(ts.tz)
|
124 |
+
)
|
125 |
+
|
126 |
+
result = tda + ts
|
127 |
+
tm.assert_extension_array_equal(result, expected)
|
128 |
+
|
129 |
+
result = ts + tda
|
130 |
+
tm.assert_extension_array_equal(result, expected)
|
131 |
+
|
132 |
+
def test_mul_scalar(self, tda):
|
133 |
+
other = 2
|
134 |
+
result = tda * other
|
135 |
+
expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)
|
136 |
+
tm.assert_extension_array_equal(result, expected)
|
137 |
+
assert result._creso == tda._creso
|
138 |
+
|
139 |
+
def test_mul_listlike(self, tda):
|
140 |
+
other = np.arange(len(tda))
|
141 |
+
result = tda * other
|
142 |
+
expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)
|
143 |
+
tm.assert_extension_array_equal(result, expected)
|
144 |
+
assert result._creso == tda._creso
|
145 |
+
|
146 |
+
def test_mul_listlike_object(self, tda):
|
147 |
+
other = np.arange(len(tda))
|
148 |
+
result = tda * other.astype(object)
|
149 |
+
expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)
|
150 |
+
tm.assert_extension_array_equal(result, expected)
|
151 |
+
assert result._creso == tda._creso
|
152 |
+
|
153 |
+
def test_div_numeric_scalar(self, tda):
|
154 |
+
other = 2
|
155 |
+
result = tda / other
|
156 |
+
expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype)
|
157 |
+
tm.assert_extension_array_equal(result, expected)
|
158 |
+
assert result._creso == tda._creso
|
159 |
+
|
160 |
+
def test_div_td_scalar(self, tda):
|
161 |
+
other = timedelta(seconds=1)
|
162 |
+
result = tda / other
|
163 |
+
expected = tda._ndarray / np.timedelta64(1, "s")
|
164 |
+
tm.assert_numpy_array_equal(result, expected)
|
165 |
+
|
166 |
+
def test_div_numeric_array(self, tda):
|
167 |
+
other = np.arange(len(tda))
|
168 |
+
result = tda / other
|
169 |
+
expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype)
|
170 |
+
tm.assert_extension_array_equal(result, expected)
|
171 |
+
assert result._creso == tda._creso
|
172 |
+
|
173 |
+
def test_div_td_array(self, tda):
|
174 |
+
other = tda._ndarray + tda._ndarray[-1]
|
175 |
+
result = tda / other
|
176 |
+
expected = tda._ndarray / other
|
177 |
+
tm.assert_numpy_array_equal(result, expected)
|
178 |
+
|
179 |
+
def test_add_timedeltaarraylike(self, tda):
|
180 |
+
tda_nano = tda.astype("m8[ns]")
|
181 |
+
|
182 |
+
expected = tda_nano * 2
|
183 |
+
res = tda_nano + tda
|
184 |
+
tm.assert_extension_array_equal(res, expected)
|
185 |
+
res = tda + tda_nano
|
186 |
+
tm.assert_extension_array_equal(res, expected)
|
187 |
+
|
188 |
+
expected = tda_nano * 0
|
189 |
+
res = tda - tda_nano
|
190 |
+
tm.assert_extension_array_equal(res, expected)
|
191 |
+
|
192 |
+
res = tda_nano - tda
|
193 |
+
tm.assert_extension_array_equal(res, expected)
|
194 |
+
|
195 |
+
|
196 |
+
class TestTimedeltaArray:
|
197 |
+
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
|
198 |
+
def test_astype_int(self, dtype):
|
199 |
+
arr = TimedeltaArray._from_sequence(
|
200 |
+
[Timedelta("1h"), Timedelta("2h")], dtype="m8[ns]"
|
201 |
+
)
|
202 |
+
|
203 |
+
if np.dtype(dtype) != np.int64:
|
204 |
+
with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):
|
205 |
+
arr.astype(dtype)
|
206 |
+
return
|
207 |
+
|
208 |
+
result = arr.astype(dtype)
|
209 |
+
expected = arr._ndarray.view("i8")
|
210 |
+
tm.assert_numpy_array_equal(result, expected)
|
211 |
+
|
212 |
+
def test_setitem_clears_freq(self):
|
213 |
+
a = pd.timedelta_range("1h", periods=2, freq="h")._data
|
214 |
+
a[0] = Timedelta("1h")
|
215 |
+
assert a.freq is None
|
216 |
+
|
217 |
+
@pytest.mark.parametrize(
|
218 |
+
"obj",
|
219 |
+
[
|
220 |
+
Timedelta(seconds=1),
|
221 |
+
Timedelta(seconds=1).to_timedelta64(),
|
222 |
+
Timedelta(seconds=1).to_pytimedelta(),
|
223 |
+
],
|
224 |
+
)
|
225 |
+
def test_setitem_objects(self, obj):
|
226 |
+
# make sure we accept timedelta64 and timedelta in addition to Timedelta
|
227 |
+
tdi = pd.timedelta_range("2 Days", periods=4, freq="h")
|
228 |
+
arr = tdi._data
|
229 |
+
|
230 |
+
arr[0] = obj
|
231 |
+
assert arr[0] == Timedelta(seconds=1)
|
232 |
+
|
233 |
+
@pytest.mark.parametrize(
|
234 |
+
"other",
|
235 |
+
[
|
236 |
+
1,
|
237 |
+
np.int64(1),
|
238 |
+
1.0,
|
239 |
+
np.datetime64("NaT"),
|
240 |
+
pd.Timestamp("2021-01-01"),
|
241 |
+
"invalid",
|
242 |
+
np.arange(10, dtype="i8") * 24 * 3600 * 10**9,
|
243 |
+
(np.arange(10) * 24 * 3600 * 10**9).view("datetime64[ns]"),
|
244 |
+
pd.Timestamp("2021-01-01").to_period("D"),
|
245 |
+
],
|
246 |
+
)
|
247 |
+
@pytest.mark.parametrize("index", [True, False])
|
248 |
+
def test_searchsorted_invalid_types(self, other, index):
|
249 |
+
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
|
250 |
+
arr = pd.TimedeltaIndex(data, freq="D")._data
|
251 |
+
if index:
|
252 |
+
arr = pd.Index(arr)
|
253 |
+
|
254 |
+
msg = "|".join(
|
255 |
+
[
|
256 |
+
"searchsorted requires compatible dtype or scalar",
|
257 |
+
"value should be a 'Timedelta', 'NaT', or array of those. Got",
|
258 |
+
]
|
259 |
+
)
|
260 |
+
with pytest.raises(TypeError, match=msg):
|
261 |
+
arr.searchsorted(other)
|
262 |
+
|
263 |
+
|
264 |
+
class TestUnaryOps:
|
265 |
+
def test_abs(self):
|
266 |
+
vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
|
267 |
+
arr = TimedeltaArray._from_sequence(vals)
|
268 |
+
|
269 |
+
evals = np.array([3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
|
270 |
+
expected = TimedeltaArray._from_sequence(evals)
|
271 |
+
|
272 |
+
result = abs(arr)
|
273 |
+
tm.assert_timedelta_array_equal(result, expected)
|
274 |
+
|
275 |
+
result2 = np.abs(arr)
|
276 |
+
tm.assert_timedelta_array_equal(result2, expected)
|
277 |
+
|
278 |
+
def test_pos(self):
|
279 |
+
vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
|
280 |
+
arr = TimedeltaArray._from_sequence(vals)
|
281 |
+
|
282 |
+
result = +arr
|
283 |
+
tm.assert_timedelta_array_equal(result, arr)
|
284 |
+
assert not tm.shares_memory(result, arr)
|
285 |
+
|
286 |
+
result2 = np.positive(arr)
|
287 |
+
tm.assert_timedelta_array_equal(result2, arr)
|
288 |
+
assert not tm.shares_memory(result2, arr)
|
289 |
+
|
290 |
+
def test_neg(self):
|
291 |
+
vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
|
292 |
+
arr = TimedeltaArray._from_sequence(vals)
|
293 |
+
|
294 |
+
evals = np.array([3600 * 10**9, "NaT", -7200 * 10**9], dtype="m8[ns]")
|
295 |
+
expected = TimedeltaArray._from_sequence(evals)
|
296 |
+
|
297 |
+
result = -arr
|
298 |
+
tm.assert_timedelta_array_equal(result, expected)
|
299 |
+
|
300 |
+
result2 = np.negative(arr)
|
301 |
+
tm.assert_timedelta_array_equal(result2, expected)
|
302 |
+
|
303 |
+
def test_neg_freq(self):
|
304 |
+
tdi = pd.timedelta_range("2 Days", periods=4, freq="h")
|
305 |
+
arr = tdi._data
|
306 |
+
|
307 |
+
expected = -tdi._data
|
308 |
+
|
309 |
+
result = -arr
|
310 |
+
tm.assert_timedelta_array_equal(result, expected)
|
311 |
+
|
312 |
+
result2 = np.negative(arr)
|
313 |
+
tm.assert_timedelta_array_equal(result2, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (184 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_config.cpython-310.pyc
ADDED
Binary file (12.9 kB). View file
|
|