diff --git a/.gitattributes b/.gitattributes index 598c42a10b2cf6e6b50887e748bfa5e68e57e28c..f8780c8e2edc8a670f5d8c6fc46c2ceacc84973e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -169,3 +169,4 @@ env-llmeval/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64 env-llmeval/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..da9bf507069d6e1f4ecf8f7d12018d89d775c3de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/join.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97df0241d7470fafc2785aaef2c2c4216803f45190f2c617a9042fecfc04678b +size 1409928 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..88a9a259ac8ec87255b25b3e15375b1c92099999 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/__init__.py @@ -0,0 +1,87 @@ +__all__ = [ + "dtypes", + "localize_pydatetime", + "NaT", + "NaTType", + "iNaT", + "nat_strings", + "OutOfBoundsDatetime", + "OutOfBoundsTimedelta", + "IncompatibleFrequency", + "Period", + "Resolution", + "Timedelta", + "normalize_i8_timestamps", + "is_date_array_normalized", + "dt64arr_to_periodarr", + "delta_to_nanoseconds", + "ints_to_pydatetime", + "ints_to_pytimedelta", + "get_resolution", + "Timestamp", + "tz_convert_from_utc_single", + "tz_convert_from_utc", + "to_offset", + "Tick", + "BaseOffset", + "tz_compare", + "is_unitless", + "astype_overflowsafe", + "get_unit_from_dtype", + "periods_per_day", + "periods_per_second", + "guess_datetime_format", + "add_overflowsafe", + "get_supported_dtype", + "is_supported_dtype", +] + +from pandas._libs.tslibs import dtypes # pylint: disable=import-self +from pandas._libs.tslibs.conversion import localize_pydatetime +from pandas._libs.tslibs.dtypes import ( + Resolution, + periods_per_day, + periods_per_second, +) +from pandas._libs.tslibs.nattype import ( + NaT, + NaTType, + iNaT, + nat_strings, +) +from pandas._libs.tslibs.np_datetime import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, + add_overflowsafe, + astype_overflowsafe, + get_supported_dtype, + is_supported_dtype, + is_unitless, + py_get_unit_from_dtype as get_unit_from_dtype, +) +from pandas._libs.tslibs.offsets import ( + BaseOffset, + Tick, + to_offset, +) +from pandas._libs.tslibs.parsing import guess_datetime_format +from pandas._libs.tslibs.period import ( + IncompatibleFrequency, + Period, +) +from pandas._libs.tslibs.timedeltas import ( + Timedelta, + delta_to_nanoseconds, + ints_to_pytimedelta, +) +from pandas._libs.tslibs.timestamps import Timestamp +from pandas._libs.tslibs.timezones import tz_compare +from pandas._libs.tslibs.tzconversion import tz_convert_from_utc_single +from pandas._libs.tslibs.vectorized import ( + dt64arr_to_periodarr, + get_resolution, + ints_to_pydatetime, + is_date_array_normalized, + normalize_i8_timestamps, + tz_convert_from_utc, +) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/base.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/base.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8e0c65f3712af2f809109c69b96c9e9774431c47 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/base.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/ccalendar.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/ccalendar.pyi new file mode 100644 index 0000000000000000000000000000000000000000..993f18a61d74aaa643e9790df70ede618b917223 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/ccalendar.pyi @@ -0,0 +1,12 @@ +DAYS: list[str] +MONTH_ALIASES: dict[int, str] +MONTH_NUMBERS: dict[str, int] +MONTHS: list[str] +int_to_weekday: dict[int, str] + +def get_firstbday(year: int, month: int) -> int: ... +def get_lastbday(year: int, month: int) -> int: ... +def get_day_of_year(year: int, month: int, day: int) -> int: ... +def get_iso_calendar(year: int, month: int, day: int) -> tuple[int, int, int]: ... +def get_week_of_year(year: int, month: int, day: int) -> int: ... +def get_days_in_month(year: int, month: int) -> int: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.pyi new file mode 100644 index 0000000000000000000000000000000000000000..26affae577f4d3f4ecda2ac9c1bc0cb748a35d4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.pyi @@ -0,0 +1,14 @@ +from datetime import ( + datetime, + tzinfo, +) + +import numpy as np + +DT64NS_DTYPE: np.dtype +TD64NS_DTYPE: np.dtype + +def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ... +def cast_from_unit_vectorized( + values: np.ndarray, unit: str, out_unit: str = ... +) -> np.ndarray: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/dtypes.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/dtypes.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7fdeb88d498ac88b6d023e25409e8f756b12c07a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/dtypes.pyi @@ -0,0 +1,83 @@ +from enum import Enum + +OFFSET_TO_PERIOD_FREQSTR: dict[str, str] + +def periods_per_day(reso: int = ...) -> int: ... +def periods_per_second(reso: int) -> int: ... +def abbrev_to_npy_unit(abbrev: str | None) -> int: ... +def freq_to_period_freqstr(freq_n: int, freq_name: str) -> str: ... + +class PeriodDtypeBase: + _dtype_code: int # PeriodDtypeCode + _n: int + + # actually __cinit__ + def __new__(cls, code: int, n: int): ... + @property + def _freq_group_code(self) -> int: ... + @property + def _resolution_obj(self) -> Resolution: ... + def _get_to_timestamp_base(self) -> int: ... + @property + def _freqstr(self) -> str: ... + def __hash__(self) -> int: ... + def _is_tick_like(self) -> bool: ... + @property + def _creso(self) -> int: ... + @property + def _td64_unit(self) -> str: ... + +class FreqGroup(Enum): + FR_ANN: int + FR_QTR: int + FR_MTH: int + FR_WK: int + FR_BUS: int + FR_DAY: int + FR_HR: int + FR_MIN: int + FR_SEC: int + FR_MS: int + FR_US: int + FR_NS: int + FR_UND: int + @staticmethod + def from_period_dtype_code(code: int) -> FreqGroup: ... + +class Resolution(Enum): + RESO_NS: int + RESO_US: int + RESO_MS: int + RESO_SEC: int + RESO_MIN: int + RESO_HR: int + RESO_DAY: int + RESO_MTH: int + RESO_QTR: int + RESO_YR: int + def __lt__(self, other: Resolution) -> bool: ... + def __ge__(self, other: Resolution) -> bool: ... + @property + def attrname(self) -> str: ... + @classmethod + def from_attrname(cls, attrname: str) -> Resolution: ... + @classmethod + def get_reso_from_freqstr(cls, freq: str) -> Resolution: ... + @property + def attr_abbrev(self) -> str: ... + +class NpyDatetimeUnit(Enum): + NPY_FR_Y: int + NPY_FR_M: int + NPY_FR_W: int + NPY_FR_D: int + NPY_FR_h: int + NPY_FR_m: int + NPY_FR_s: int + NPY_FR_ms: int + NPY_FR_us: int + NPY_FR_ns: int + NPY_FR_ps: int + NPY_FR_fs: int + NPY_FR_as: int + NPY_FR_GENERIC: int diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c6cfd44e9f6ab76ee4bc3be7c765569991514d18 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/fields.pyi @@ -0,0 +1,62 @@ +import numpy as np + +from pandas._typing import npt + +def build_field_sarray( + dtindex: npt.NDArray[np.int64], # const int64_t[:] + reso: int, # NPY_DATETIMEUNIT +) -> np.ndarray: ... +def month_position_check(fields, weekdays) -> str | None: ... +def get_date_name_field( + dtindex: npt.NDArray[np.int64], # const int64_t[:] + field: str, + locale: str | None = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.object_]: ... +def get_start_end_field( + dtindex: npt.NDArray[np.int64], + field: str, + freqstr: str | None = ..., + month_kw: int = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.bool_]: ... +def get_date_field( + dtindex: npt.NDArray[np.int64], # const int64_t[:] + field: str, + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int32]: ... +def get_timedelta_field( + tdindex: npt.NDArray[np.int64], # const int64_t[:] + field: str, + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int32]: ... +def get_timedelta_days( + tdindex: npt.NDArray[np.int64], # const int64_t[:] + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... +def isleapyear_arr( + years: np.ndarray, +) -> npt.NDArray[np.bool_]: ... +def build_isocalendar_sarray( + dtindex: npt.NDArray[np.int64], # const int64_t[:] + reso: int, # NPY_DATETIMEUNIT +) -> np.ndarray: ... +def _get_locale_names(name_type: str, locale: str | None = ...): ... + +class RoundTo: + @property + def MINUS_INFTY(self) -> int: ... + @property + def PLUS_INFTY(self) -> int: ... + @property + def NEAREST_HALF_EVEN(self) -> int: ... + @property + def NEAREST_HALF_PLUS_INFTY(self) -> int: ... + @property + def NEAREST_HALF_MINUS_INFTY(self) -> int: ... + +def round_nsint64( + values: npt.NDArray[np.int64], + mode: RoundTo, + nanos: int, +) -> npt.NDArray[np.int64]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..47f093e59aa3c802c7bc11bd49b7f5c09994c4ec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/nattype.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/np_datetime.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/np_datetime.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bf51e21f48b5fe1789c80537bb71de6c78f7f2d9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/np_datetime.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..373ac64c273a9bc471fb02504390c16583696c07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..40394f915d4b0f12d3631d997c46381757152bfe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/parsing.pyi @@ -0,0 +1,33 @@ +from datetime import datetime + +import numpy as np + +from pandas._typing import npt + +class DateParseError(ValueError): ... + +def py_parse_datetime_string( + date_string: str, + dayfirst: bool = ..., + yearfirst: bool = ..., +) -> datetime: ... +def parse_datetime_string_with_reso( + date_string: str, + freq: str | None = ..., + dayfirst: bool | None = ..., + yearfirst: bool | None = ..., +) -> tuple[datetime, str]: ... +def _does_string_look_like_datetime(py_string: str) -> bool: ... +def quarter_to_myear(year: int, quarter: int, freq: str) -> tuple[int, int]: ... +def try_parse_dates( + values: npt.NDArray[np.object_], # object[:] + parser, +) -> npt.NDArray[np.object_]: ... +def guess_datetime_format( + dt_str: str, + dayfirst: bool | None = ..., +) -> str | None: ... +def concat_date_cols( + date_cols: tuple, +) -> npt.NDArray[np.object_]: ... +def get_rule_month(source: str) -> str: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0ec1a1e25a2b3cfe974baebfe32d686435f73e11 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/strptime.pyi @@ -0,0 +1,14 @@ +import numpy as np + +from pandas._typing import npt + +def array_strptime( + values: npt.NDArray[np.object_], + fmt: str | None, + exact: bool = ..., + errors: str = ..., + utc: bool = ..., + creso: int = ..., # NPY_DATETIMEUNIT +) -> tuple[np.ndarray, np.ndarray]: ... + +# first ndarray is M8[ns], second is object ndarray of tzinfo | None diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..80f26f629231fca07c0740ced156d0d030d19a25 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.pyi new file mode 100644 index 0000000000000000000000000000000000000000..24ec6c8891a89a7af4042df066fec6bc9d7b0e04 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timedeltas.pyi @@ -0,0 +1,174 @@ +from datetime import timedelta +from typing import ( + ClassVar, + Literal, + TypeAlias, + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs.tslibs import ( + NaTType, + Tick, +) +from pandas._typing import ( + Frequency, + Self, + npt, +) + +# This should be kept consistent with the keys in the dict timedelta_abbrevs +# in pandas/_libs/tslibs/timedeltas.pyx +UnitChoices: TypeAlias = Literal[ + "Y", + "y", + "M", + "W", + "w", + "D", + "d", + "days", + "day", + "hours", + "hour", + "hr", + "h", + "m", + "minute", + "min", + "minutes", + "T", + "t", + "s", + "seconds", + "sec", + "second", + "ms", + "milliseconds", + "millisecond", + "milli", + "millis", + "L", + "l", + "us", + "microseconds", + "microsecond", + "µs", + "micro", + "micros", + "u", + "ns", + "nanoseconds", + "nano", + "nanos", + "nanosecond", + "n", +] +_S = TypeVar("_S", bound=timedelta) + +def get_unit_for_round(freq, creso: int) -> int: ... +def disallow_ambiguous_unit(unit: str | None) -> None: ... +def ints_to_pytimedelta( + m8values: npt.NDArray[np.timedelta64], + box: bool = ..., +) -> npt.NDArray[np.object_]: ... +def array_to_timedelta64( + values: npt.NDArray[np.object_], + unit: str | None = ..., + errors: str = ..., +) -> np.ndarray: ... # np.ndarray[m8ns] +def parse_timedelta_unit(unit: str | None) -> UnitChoices: ... +def delta_to_nanoseconds( + delta: np.timedelta64 | timedelta | Tick, + reso: int = ..., # NPY_DATETIMEUNIT + round_ok: bool = ..., +) -> int: ... +def floordiv_object_array( + left: np.ndarray, right: npt.NDArray[np.object_] +) -> np.ndarray: ... +def truediv_object_array( + left: np.ndarray, right: npt.NDArray[np.object_] +) -> np.ndarray: ... + +class Timedelta(timedelta): + _creso: int + min: ClassVar[Timedelta] + max: ClassVar[Timedelta] + resolution: ClassVar[Timedelta] + value: int # np.int64 + _value: int # np.int64 + # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]") + def __new__( # type: ignore[misc] + cls: type[_S], + value=..., + unit: str | None = ..., + **kwargs: float | np.integer | np.floating, + ) -> _S | NaTType: ... + @classmethod + def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ... + @property + def days(self) -> int: ... + @property + def seconds(self) -> int: ... + @property + def microseconds(self) -> int: ... + def total_seconds(self) -> float: ... + def to_pytimedelta(self) -> timedelta: ... + def to_timedelta64(self) -> np.timedelta64: ... + @property + def asm8(self) -> np.timedelta64: ... + # TODO: round/floor/ceil could return NaT? + def round(self, freq: Frequency) -> Self: ... + def floor(self, freq: Frequency) -> Self: ... + def ceil(self, freq: Frequency) -> Self: ... + @property + def resolution_string(self) -> str: ... + def __add__(self, other: timedelta) -> Timedelta: ... + def __radd__(self, other: timedelta) -> Timedelta: ... + def __sub__(self, other: timedelta) -> Timedelta: ... + def __rsub__(self, other: timedelta) -> Timedelta: ... + def __neg__(self) -> Timedelta: ... + def __pos__(self) -> Timedelta: ... + def __abs__(self) -> Timedelta: ... + def __mul__(self, other: float) -> Timedelta: ... + def __rmul__(self, other: float) -> Timedelta: ... + # error: Signature of "__floordiv__" incompatible with supertype "timedelta" + @overload # type: ignore[override] + def __floordiv__(self, other: timedelta) -> int: ... + @overload + def __floordiv__(self, other: float) -> Timedelta: ... + @overload + def __floordiv__( + self, other: npt.NDArray[np.timedelta64] + ) -> npt.NDArray[np.intp]: ... + @overload + def __floordiv__( + self, other: npt.NDArray[np.number] + ) -> npt.NDArray[np.timedelta64] | Timedelta: ... + @overload + def __rfloordiv__(self, other: timedelta | str) -> int: ... + @overload + def __rfloordiv__(self, other: None | NaTType) -> NaTType: ... + @overload + def __rfloordiv__(self, other: np.ndarray) -> npt.NDArray[np.timedelta64]: ... + @overload + def __truediv__(self, other: timedelta) -> float: ... + @overload + def __truediv__(self, other: float) -> Timedelta: ... + def __mod__(self, other: timedelta) -> Timedelta: ... + def __divmod__(self, other: timedelta) -> tuple[int, Timedelta]: ... + def __le__(self, other: timedelta) -> bool: ... + def __lt__(self, other: timedelta) -> bool: ... + def __ge__(self, other: timedelta) -> bool: ... + def __gt__(self, other: timedelta) -> bool: ... + def __hash__(self) -> int: ... + def isoformat(self) -> str: ... + def to_numpy( + self, dtype: npt.DTypeLike = ..., copy: bool = False + ) -> np.timedelta64: ... + def view(self, dtype: npt.DTypeLike) -> object: ... + @property + def unit(self) -> str: ... + def as_unit(self, unit: str, round_ok: bool = ...) -> Timedelta: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3ad93a6c86cb482a32ea43eba02b7b764e80a293 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c769b09d1b7a1adc4532dd367bcbc4cf522419cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.pyi @@ -0,0 +1,241 @@ +from datetime import ( + date as _date, + datetime, + time as _time, + timedelta, + tzinfo as _tzinfo, +) +from time import struct_time +from typing import ( + ClassVar, + Literal, + TypeAlias, + overload, +) + +import numpy as np + +from pandas._libs.tslibs import ( + BaseOffset, + NaTType, + Period, + Tick, + Timedelta, +) +from pandas._typing import ( + Self, + TimestampNonexistent, +) + +_TimeZones: TypeAlias = str | _tzinfo | None | int + +def integer_op_not_supported(obj: object) -> TypeError: ... + +class Timestamp(datetime): + _creso: int + min: ClassVar[Timestamp] + max: ClassVar[Timestamp] + + resolution: ClassVar[Timedelta] + _value: int # np.int64 + # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]") + def __new__( # type: ignore[misc] + cls: type[Self], + ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ..., + year: int | None = ..., + month: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + microsecond: int | None = ..., + tzinfo: _tzinfo | None = ..., + *, + nanosecond: int | None = ..., + tz: _TimeZones = ..., + unit: str | int | None = ..., + fold: int | None = ..., + ) -> Self | NaTType: ... + @classmethod + def _from_value_and_reso( + cls, value: int, reso: int, tz: _TimeZones + ) -> Timestamp: ... + @property + def value(self) -> int: ... # np.int64 + @property + def year(self) -> int: ... + @property + def month(self) -> int: ... + @property + def day(self) -> int: ... + @property + def hour(self) -> int: ... + @property + def minute(self) -> int: ... + @property + def second(self) -> int: ... + @property + def microsecond(self) -> int: ... + @property + def nanosecond(self) -> int: ... + @property + def tzinfo(self) -> _tzinfo | None: ... + @property + def tz(self) -> _tzinfo | None: ... + @property + def fold(self) -> int: ... + @classmethod + def fromtimestamp(cls, ts: float, tz: _TimeZones = ...) -> Self: ... + @classmethod + def utcfromtimestamp(cls, ts: float) -> Self: ... + @classmethod + def today(cls, tz: _TimeZones = ...) -> Self: ... + @classmethod + def fromordinal( + cls, + ordinal: int, + tz: _TimeZones = ..., + ) -> Self: ... + @classmethod + def now(cls, tz: _TimeZones = ...) -> Self: ... + @classmethod + def utcnow(cls) -> Self: ... + # error: Signature of "combine" incompatible with supertype "datetime" + @classmethod + def combine( # type: ignore[override] + cls, date: _date, time: _time + ) -> datetime: ... + @classmethod + def fromisoformat(cls, date_string: str) -> Self: ... + def strftime(self, format: str) -> str: ... + def __format__(self, fmt: str) -> str: ... + def toordinal(self) -> int: ... + def timetuple(self) -> struct_time: ... + def timestamp(self) -> float: ... + def utctimetuple(self) -> struct_time: ... + def date(self) -> _date: ... + def time(self) -> _time: ... + def timetz(self) -> _time: ... + # LSP violation: nanosecond is not present in datetime.datetime.replace + # and has positional args following it + def replace( # type: ignore[override] + self, + year: int | None = ..., + month: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + microsecond: int | None = ..., + nanosecond: int | None = ..., + tzinfo: _tzinfo | type[object] | None = ..., + fold: int | None = ..., + ) -> Self: ... + # LSP violation: datetime.datetime.astimezone has a default value for tz + def astimezone(self, tz: _TimeZones) -> Self: ... # type: ignore[override] + def ctime(self) -> str: ... + def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... + @classmethod + def strptime( + # Note: strptime is actually disabled and raises NotImplementedError + cls, + date_string: str, + format: str, + ) -> Self: ... + def utcoffset(self) -> timedelta | None: ... + def tzname(self) -> str | None: ... + def dst(self) -> timedelta | None: ... + def __le__(self, other: datetime) -> bool: ... # type: ignore[override] + def __lt__(self, other: datetime) -> bool: ... # type: ignore[override] + def __ge__(self, other: datetime) -> bool: ... # type: ignore[override] + def __gt__(self, other: datetime) -> bool: ... # type: ignore[override] + # error: Signature of "__add__" incompatible with supertype "date"/"datetime" + @overload # type: ignore[override] + def __add__(self, other: np.ndarray) -> np.ndarray: ... + @overload + def __add__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ... + def __radd__(self, other: timedelta) -> Self: ... + @overload # type: ignore[override] + def __sub__(self, other: datetime) -> Timedelta: ... + @overload + def __sub__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ... + def __hash__(self) -> int: ... + def weekday(self) -> int: ... + def isoweekday(self) -> int: ... + # Return type "Tuple[int, int, int]" of "isocalendar" incompatible with return + # type "_IsoCalendarDate" in supertype "date" + def isocalendar(self) -> tuple[int, int, int]: ... # type: ignore[override] + @property + def is_leap_year(self) -> bool: ... + @property + def is_month_start(self) -> bool: ... + @property + def is_quarter_start(self) -> bool: ... + @property + def is_year_start(self) -> bool: ... + @property + def is_month_end(self) -> bool: ... + @property + def is_quarter_end(self) -> bool: ... + @property + def is_year_end(self) -> bool: ... + def to_pydatetime(self, warn: bool = ...) -> datetime: ... + def to_datetime64(self) -> np.datetime64: ... + def to_period(self, freq: BaseOffset | str | None = None) -> Period: ... + def to_julian_date(self) -> np.float64: ... + @property + def asm8(self) -> np.datetime64: ... + def tz_convert(self, tz: _TimeZones) -> Self: ... + # TODO: could return NaT? + def tz_localize( + self, + tz: _TimeZones, + ambiguous: bool | Literal["raise", "NaT"] = ..., + nonexistent: TimestampNonexistent = ..., + ) -> Self: ... + def normalize(self) -> Self: ... + # TODO: round/floor/ceil could return NaT? + def round( + self, + freq: str, + ambiguous: bool | Literal["raise", "NaT"] = ..., + nonexistent: TimestampNonexistent = ..., + ) -> Self: ... + def floor( + self, + freq: str, + ambiguous: bool | Literal["raise", "NaT"] = ..., + nonexistent: TimestampNonexistent = ..., + ) -> Self: ... + def ceil( + self, + freq: str, + ambiguous: bool | Literal["raise", "NaT"] = ..., + nonexistent: TimestampNonexistent = ..., + ) -> Self: ... + def day_name(self, locale: str | None = ...) -> str: ... + def month_name(self, locale: str | None = ...) -> str: ... + @property + def day_of_week(self) -> int: ... + @property + def dayofweek(self) -> int: ... + @property + def day_of_year(self) -> int: ... + @property + def dayofyear(self) -> int: ... + @property + def quarter(self) -> int: ... + @property + def week(self) -> int: ... + def to_numpy( + self, dtype: np.dtype | None = ..., copy: bool = ... + ) -> np.datetime64: ... + @property + def _date_repr(self) -> str: ... + @property + def days_in_month(self) -> int: ... + @property + def daysinmonth(self) -> int: ... + @property + def unit(self) -> str: ... + def as_unit(self, unit: str, round_ok: bool = ...) -> Timestamp: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..202f440c5dbe53bdcac01430beff3d9d7664df04 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2108fa0f35547191c3db683f8cc1015e5bda4abb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/tzconversion.pyi @@ -0,0 +1,21 @@ +from datetime import ( + timedelta, + tzinfo, +) +from typing import Iterable + +import numpy as np + +from pandas._typing import npt + +# tz_convert_from_utc_single exposed for testing +def tz_convert_from_utc_single( + utc_val: np.int64, tz: tzinfo, creso: int = ... +) -> np.int64: ... +def tz_localize_to_utc( + vals: npt.NDArray[np.int64], + tz: tzinfo | None, + ambiguous: str | bool | Iterable[bool] | None = ..., + nonexistent: str | timedelta | np.timedelta64 | None = ..., + creso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b88016fa186b01b5d503229e186ab630cf4ad61a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.pyi b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.pyi new file mode 100644 index 0000000000000000000000000000000000000000..de19f592da62bbeb0b3ab267039abd902d4bb854 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/vectorized.pyi @@ -0,0 +1,43 @@ +""" +For cython types that cannot be represented precisely, closest-available +python equivalents are used, and the precise types kept as adjacent comments. +""" +from datetime import tzinfo + +import numpy as np + +from pandas._libs.tslibs.dtypes import Resolution +from pandas._typing import npt + +def dt64arr_to_periodarr( + stamps: npt.NDArray[np.int64], + freq: int, + tz: tzinfo | None, + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... +def is_date_array_normalized( + stamps: npt.NDArray[np.int64], + tz: tzinfo | None, + reso: int, # NPY_DATETIMEUNIT +) -> bool: ... +def normalize_i8_timestamps( + stamps: npt.NDArray[np.int64], + tz: tzinfo | None, + reso: int, # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... +def get_resolution( + stamps: npt.NDArray[np.int64], + tz: tzinfo | None = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> Resolution: ... +def ints_to_pydatetime( + stamps: npt.NDArray[np.int64], + tz: tzinfo | None = ..., + box: str = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.object_]: ... +def tz_convert_from_utc( + stamps: npt.NDArray[np.int64], + tz: tzinfo | None, + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff11968db15f0f7c6057a46c252a91daee7b9cd9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__init__.py @@ -0,0 +1,9 @@ +from pandas.io.parsers.readers import ( + TextFileReader, + TextParser, + read_csv, + read_fwf, + read_table, +) + +__all__ = ["TextFileReader", "TextParser", "read_csv", "read_fwf", "read_table"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..051c15fa1d82d0efc992b4821ace6afd377cfeae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cad5895d0fffe8adf9c6d37ab2a4f3c6570e807a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e45827796d975fef83686fa8a1e3d3e9d74a1164 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed6f12fa9c17d8d4e1603ba18d7e23d2c8345dc2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..890b22154648e6b12d636c5df3595d105ff02ac9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py @@ -0,0 +1,303 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +import warnings + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + ParserError, + ParserWarning, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.inference import is_integer + +import pandas as pd +from pandas import DataFrame + +from pandas.io._util import ( + _arrow_dtype_mapping, + arrow_string_types_mapper, +) +from pandas.io.parsers.base_parser import ParserBase + +if TYPE_CHECKING: + from pandas._typing import ReadBuffer + + +class ArrowParserWrapper(ParserBase): + """ + Wrapper for the pyarrow engine for read_csv() + """ + + def __init__(self, src: ReadBuffer[bytes], **kwds) -> None: + super().__init__(kwds) + self.kwds = kwds + self.src = src + + self._parse_kwds() + + def _parse_kwds(self) -> None: + """ + Validates keywords before passing to pyarrow. + """ + encoding: str | None = self.kwds.get("encoding") + self.encoding = "utf-8" if encoding is None else encoding + + na_values = self.kwds["na_values"] + if isinstance(na_values, dict): + raise ValueError( + "The pyarrow engine doesn't support passing a dict for na_values" + ) + self.na_values = list(self.kwds["na_values"]) + + def _get_pyarrow_options(self) -> None: + """ + Rename some arguments to pass to pyarrow + """ + mapping = { + "usecols": "include_columns", + "na_values": "null_values", + "escapechar": "escape_char", + "skip_blank_lines": "ignore_empty_lines", + "decimal": "decimal_point", + "quotechar": "quote_char", + } + for pandas_name, pyarrow_name in mapping.items(): + if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None: + self.kwds[pyarrow_name] = self.kwds.pop(pandas_name) + + # Date format handling + # If we get a string, we need to convert it into a list for pyarrow + # If we get a dict, we want to parse those separately + date_format = self.date_format + if isinstance(date_format, str): + date_format = [date_format] + else: + # In case of dict, we don't want to propagate through, so + # just set to pyarrow default of None + + # Ideally, in future we disable pyarrow dtype inference (read in as string) + # to prevent misreads. + date_format = None + self.kwds["timestamp_parsers"] = date_format + + self.parse_options = { + option_name: option_value + for option_name, option_value in self.kwds.items() + if option_value is not None + and option_name + in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines") + } + + on_bad_lines = self.kwds.get("on_bad_lines") + if on_bad_lines is not None: + if callable(on_bad_lines): + self.parse_options["invalid_row_handler"] = on_bad_lines + elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR: + self.parse_options[ + "invalid_row_handler" + ] = None # PyArrow raises an exception by default + elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN: + + def handle_warning(invalid_row) -> str: + warnings.warn( + f"Expected {invalid_row.expected_columns} columns, but found " + f"{invalid_row.actual_columns}: {invalid_row.text}", + ParserWarning, + stacklevel=find_stack_level(), + ) + return "skip" + + self.parse_options["invalid_row_handler"] = handle_warning + elif on_bad_lines == ParserBase.BadLineHandleMethod.SKIP: + self.parse_options["invalid_row_handler"] = lambda _: "skip" + + self.convert_options = { + option_name: option_value + for option_name, option_value in self.kwds.items() + if option_value is not None + and option_name + in ( + "include_columns", + "null_values", + "true_values", + "false_values", + "decimal_point", + "timestamp_parsers", + ) + } + self.convert_options["strings_can_be_null"] = "" in self.kwds["null_values"] + # autogenerated column names are prefixed with 'f' in pyarrow.csv + if self.header is None and "include_columns" in self.convert_options: + self.convert_options["include_columns"] = [ + f"f{n}" for n in self.convert_options["include_columns"] + ] + + self.read_options = { + "autogenerate_column_names": self.header is None, + "skip_rows": self.header + if self.header is not None + else self.kwds["skiprows"], + "encoding": self.encoding, + } + + def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame: + """ + Processes data read in based on kwargs. + + Parameters + ---------- + frame: DataFrame + The DataFrame to process. + + Returns + ------- + DataFrame + The processed DataFrame. + """ + num_cols = len(frame.columns) + multi_index_named = True + if self.header is None: + if self.names is None: + if self.header is None: + self.names = range(num_cols) + if len(self.names) != num_cols: + # usecols is passed through to pyarrow, we only handle index col here + # The only way self.names is not the same length as number of cols is + # if we have int index_col. We should just pad the names(they will get + # removed anyways) to expected length then. + self.names = list(range(num_cols - len(self.names))) + self.names + multi_index_named = False + frame.columns = self.names + # we only need the frame not the names + _, frame = self._do_date_conversions(frame.columns, frame) + if self.index_col is not None: + index_to_set = self.index_col.copy() + for i, item in enumerate(self.index_col): + if is_integer(item): + index_to_set[i] = frame.columns[item] + # String case + elif item not in frame.columns: + raise ValueError(f"Index {item} invalid") + + # Process dtype for index_col and drop from dtypes + if self.dtype is not None: + key, new_dtype = ( + (item, self.dtype.get(item)) + if self.dtype.get(item) is not None + else (frame.columns[item], self.dtype.get(frame.columns[item])) + ) + if new_dtype is not None: + frame[key] = frame[key].astype(new_dtype) + del self.dtype[key] + + frame.set_index(index_to_set, drop=True, inplace=True) + # Clear names if headerless and no name given + if self.header is None and not multi_index_named: + frame.index.names = [None] * len(frame.index.names) + + if self.dtype is not None: + # Ignore non-existent columns from dtype mapping + # like other parsers do + if isinstance(self.dtype, dict): + self.dtype = { + k: pandas_dtype(v) + for k, v in self.dtype.items() + if k in frame.columns + } + else: + self.dtype = pandas_dtype(self.dtype) + try: + frame = frame.astype(self.dtype) + except TypeError as e: + # GH#44901 reraise to keep api consistent + raise ValueError(e) + return frame + + def _validate_usecols(self, usecols) -> None: + if lib.is_list_like(usecols) and not all(isinstance(x, str) for x in usecols): + raise ValueError( + "The pyarrow engine does not allow 'usecols' to be integer " + "column positions. Pass a list of string column names instead." + ) + elif callable(usecols): + raise ValueError( + "The pyarrow engine does not allow 'usecols' to be a callable." + ) + + def read(self) -> DataFrame: + """ + Reads the contents of a CSV file into a DataFrame and + processes it according to the kwargs passed in the + constructor. + + Returns + ------- + DataFrame + The DataFrame created from the CSV file. + """ + pa = import_optional_dependency("pyarrow") + pyarrow_csv = import_optional_dependency("pyarrow.csv") + self._get_pyarrow_options() + + try: + convert_options = pyarrow_csv.ConvertOptions(**self.convert_options) + except TypeError: + include = self.convert_options.get("include_columns", None) + if include is not None: + self._validate_usecols(include) + + nulls = self.convert_options.get("null_values", set()) + if not lib.is_list_like(nulls) or not all( + isinstance(x, str) for x in nulls + ): + raise TypeError( + "The 'pyarrow' engine requires all na_values to be strings" + ) + + raise + + try: + table = pyarrow_csv.read_csv( + self.src, + read_options=pyarrow_csv.ReadOptions(**self.read_options), + parse_options=pyarrow_csv.ParseOptions(**self.parse_options), + convert_options=convert_options, + ) + except pa.ArrowInvalid as e: + raise ParserError(e) from e + + dtype_backend = self.kwds["dtype_backend"] + + # Convert all pa.null() cols -> float64 (non nullable) + # else Int64 (nullable case, see below) + if dtype_backend is lib.no_default: + new_schema = table.schema + new_type = pa.float64() + for i, arrow_type in enumerate(table.schema.types): + if pa.types.is_null(arrow_type): + new_schema = new_schema.set( + i, new_schema.field(i).with_type(new_type) + ) + + table = table.cast(new_schema) + + if dtype_backend == "pyarrow": + frame = table.to_pandas(types_mapper=pd.ArrowDtype) + elif dtype_backend == "numpy_nullable": + # Modify the default mapping to also + # map null to Int64 (to match other engines) + dtype_mapping = _arrow_dtype_mapping() + dtype_mapping[pa.null()] = pd.Int64Dtype() + frame = table.to_pandas(types_mapper=dtype_mapping.get) + elif using_pyarrow_string_dtype(): + frame = table.to_pandas(types_mapper=arrow_string_types_mapper()) + + else: + frame = table.to_pandas() + return self._finalize_pandas_output(frame) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..09f0f2af8e5c6b55bff173ff74cd290fdf61cbae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py @@ -0,0 +1,1448 @@ +from __future__ import annotations + +from collections import defaultdict +from copy import copy +import csv +import datetime +from enum import Enum +import itertools +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, + final, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + parsers, +) +import pandas._libs.ops as libops +from pandas._libs.parsers import STR_NA_VALUES +from pandas._libs.tslibs import parsing +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + ParserError, + ParserWarning, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.astype import astype_array +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_dict_like, + is_extension_array_dtype, + is_float_dtype, + is_integer, + is_integer_dtype, + is_list_like, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.missing import isna + +from pandas import ( + ArrowDtype, + DataFrame, + DatetimeIndex, + StringDtype, + concat, +) +from pandas.core import algorithms +from pandas.core.arrays import ( + ArrowExtensionArray, + BaseMaskedArray, + BooleanArray, + Categorical, + ExtensionArray, + FloatingArray, + IntegerArray, +) +from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.indexes.api import ( + Index, + MultiIndex, + default_index, + ensure_index_from_sequences, +) +from pandas.core.series import Series +from pandas.core.tools import datetimes as tools + +from pandas.io.common import is_potential_multi_index + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + DtypeArg, + DtypeObj, + Scalar, + ) + + +class ParserBase: + class BadLineHandleMethod(Enum): + ERROR = 0 + WARN = 1 + SKIP = 2 + + _implicit_index: bool + _first_chunk: bool + keep_default_na: bool + dayfirst: bool + cache_dates: bool + keep_date_col: bool + usecols_dtype: str | None + + def __init__(self, kwds) -> None: + self._implicit_index = False + + self.names = kwds.get("names") + self.orig_names: Sequence[Hashable] | None = None + + self.index_col = kwds.get("index_col", None) + self.unnamed_cols: set = set() + self.index_names: Sequence[Hashable] | None = None + self.col_names: Sequence[Hashable] | None = None + + self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False)) + self._parse_date_cols: Iterable = [] + self.date_parser = kwds.pop("date_parser", lib.no_default) + self.date_format = kwds.pop("date_format", None) + self.dayfirst = kwds.pop("dayfirst", False) + self.keep_date_col = kwds.pop("keep_date_col", False) + + self.na_values = kwds.get("na_values") + self.na_fvalues = kwds.get("na_fvalues") + self.na_filter = kwds.get("na_filter", False) + self.keep_default_na = kwds.get("keep_default_na", True) + + self.dtype = copy(kwds.get("dtype", None)) + self.converters = kwds.get("converters") + self.dtype_backend = kwds.get("dtype_backend") + + self.true_values = kwds.get("true_values") + self.false_values = kwds.get("false_values") + self.cache_dates = kwds.pop("cache_dates", True) + + self._date_conv = _make_date_converter( + date_parser=self.date_parser, + date_format=self.date_format, + dayfirst=self.dayfirst, + cache_dates=self.cache_dates, + ) + + # validate header options for mi + self.header = kwds.get("header") + if is_list_like(self.header, allow_sets=False): + if kwds.get("usecols"): + raise ValueError( + "cannot specify usecols when specifying a multi-index header" + ) + if kwds.get("names"): + raise ValueError( + "cannot specify names when specifying a multi-index header" + ) + + # validate index_col that only contains integers + if self.index_col is not None: + # In this case we can pin down index_col as list[int] + if is_integer(self.index_col): + self.index_col = [self.index_col] + elif not ( + is_list_like(self.index_col, allow_sets=False) + and all(map(is_integer, self.index_col)) + ): + raise ValueError( + "index_col must only contain row numbers " + "when specifying a multi-index header" + ) + else: + self.index_col = list(self.index_col) + + self._name_processed = False + + self._first_chunk = True + + self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"]) + + # Fallback to error to pass a sketchy test(test_override_set_noconvert_columns) + # Normally, this arg would get pre-processed earlier on + self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR) + + def _validate_parse_dates_presence(self, columns: Sequence[Hashable]) -> Iterable: + """ + Check if parse_dates are in columns. + + If user has provided names for parse_dates, check if those columns + are available. + + Parameters + ---------- + columns : list + List of names of the dataframe. + + Returns + ------- + The names of the columns which will get parsed later if a dict or list + is given as specification. + + Raises + ------ + ValueError + If column to parse_date is not in dataframe. + + """ + cols_needed: Iterable + if is_dict_like(self.parse_dates): + cols_needed = itertools.chain(*self.parse_dates.values()) + elif is_list_like(self.parse_dates): + # a column in parse_dates could be represented + # ColReference = Union[int, str] + # DateGroups = List[ColReference] + # ParseDates = Union[DateGroups, List[DateGroups], + # Dict[ColReference, DateGroups]] + cols_needed = itertools.chain.from_iterable( + col if is_list_like(col) and not isinstance(col, tuple) else [col] + for col in self.parse_dates + ) + else: + cols_needed = [] + + cols_needed = list(cols_needed) + + # get only columns that are references using names (str), not by index + missing_cols = ", ".join( + sorted( + { + col + for col in cols_needed + if isinstance(col, str) and col not in columns + } + ) + ) + if missing_cols: + raise ValueError( + f"Missing column provided to 'parse_dates': '{missing_cols}'" + ) + # Convert positions to actual column names + return [ + col if (isinstance(col, str) or col in columns) else columns[col] + for col in cols_needed + ] + + def close(self) -> None: + pass + + @final + @property + def _has_complex_date_col(self) -> bool: + return isinstance(self.parse_dates, dict) or ( + isinstance(self.parse_dates, list) + and len(self.parse_dates) > 0 + and isinstance(self.parse_dates[0], list) + ) + + @final + def _should_parse_dates(self, i: int) -> bool: + if lib.is_bool(self.parse_dates): + return bool(self.parse_dates) + else: + if self.index_names is not None: + name = self.index_names[i] + else: + name = None + j = i if self.index_col is None else self.index_col[i] + + return (j in self.parse_dates) or ( + name is not None and name in self.parse_dates + ) + + @final + def _extract_multi_indexer_columns( + self, + header, + index_names: Sequence[Hashable] | None, + passed_names: bool = False, + ) -> tuple[ + Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool + ]: + """ + Extract and return the names, index_names, col_names if the column + names are a MultiIndex. + + Parameters + ---------- + header: list of lists + The header rows + index_names: list, optional + The names of the future index + passed_names: bool, default False + A flag specifying if names where passed + + """ + if len(header) < 2: + return header[0], index_names, None, passed_names + + # the names are the tuples of the header that are not the index cols + # 0 is the name of the index, assuming index_col is a list of column + # numbers + ic = self.index_col + if ic is None: + ic = [] + + if not isinstance(ic, (list, tuple, np.ndarray)): + ic = [ic] + sic = set(ic) + + # clean the index_names + index_names = header.pop(-1) + index_names, _, _ = self._clean_index_names(index_names, self.index_col) + + # extract the columns + field_count = len(header[0]) + + # check if header lengths are equal + if not all(len(header_iter) == field_count for header_iter in header[1:]): + raise ParserError("Header rows must have an equal number of columns.") + + def extract(r): + return tuple(r[i] for i in range(field_count) if i not in sic) + + columns = list(zip(*(extract(r) for r in header))) + names = columns.copy() + for single_ic in sorted(ic): + names.insert(single_ic, single_ic) + + # Clean the column names (if we have an index_col). + if len(ic): + col_names = [ + r[ic[0]] + if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols) + else None + for r in header + ] + else: + col_names = [None] * len(header) + + passed_names = True + + return names, index_names, col_names, passed_names + + @final + def _maybe_make_multi_index_columns( + self, + columns: Sequence[Hashable], + col_names: Sequence[Hashable] | None = None, + ) -> Sequence[Hashable] | MultiIndex: + # possibly create a column mi here + if is_potential_multi_index(columns): + list_columns = cast(list[tuple], columns) + return MultiIndex.from_tuples(list_columns, names=col_names) + return columns + + @final + def _make_index( + self, data, alldata, columns, indexnamerow: list[Scalar] | None = None + ) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]: + index: Index | None + if not is_index_col(self.index_col) or not self.index_col: + index = None + + elif not self._has_complex_date_col: + simple_index = self._get_simple_index(alldata, columns) + index = self._agg_index(simple_index) + elif self._has_complex_date_col: + if not self._name_processed: + (self.index_names, _, self.index_col) = self._clean_index_names( + list(columns), self.index_col + ) + self._name_processed = True + date_index = self._get_complex_date_index(data, columns) + index = self._agg_index(date_index, try_parse_dates=False) + + # add names for the index + if indexnamerow: + coffset = len(indexnamerow) - len(columns) + assert index is not None + index = index.set_names(indexnamerow[:coffset]) + + # maybe create a mi on the columns + columns = self._maybe_make_multi_index_columns(columns, self.col_names) + + return index, columns + + @final + def _get_simple_index(self, data, columns): + def ix(col): + if not isinstance(col, str): + return col + raise ValueError(f"Index {col} invalid") + + to_remove = [] + index = [] + for idx in self.index_col: + i = ix(idx) + to_remove.append(i) + index.append(data[i]) + + # remove index items from content and columns, don't pop in + # loop + for i in sorted(to_remove, reverse=True): + data.pop(i) + if not self._implicit_index: + columns.pop(i) + + return index + + @final + def _get_complex_date_index(self, data, col_names): + def _get_name(icol): + if isinstance(icol, str): + return icol + + if col_names is None: + raise ValueError(f"Must supply column order to use {icol!s} as index") + + for i, c in enumerate(col_names): + if i == icol: + return c + + to_remove = [] + index = [] + for idx in self.index_col: + name = _get_name(idx) + to_remove.append(name) + index.append(data[name]) + + # remove index items from content and columns, don't pop in + # loop + for c in sorted(to_remove, reverse=True): + data.pop(c) + col_names.remove(c) + + return index + + @final + def _clean_mapping(self, mapping): + """converts col numbers to names""" + if not isinstance(mapping, dict): + return mapping + clean = {} + # for mypy + assert self.orig_names is not None + + for col, v in mapping.items(): + if isinstance(col, int) and col not in self.orig_names: + col = self.orig_names[col] + clean[col] = v + if isinstance(mapping, defaultdict): + remaining_cols = set(self.orig_names) - set(clean.keys()) + clean.update({col: mapping[col] for col in remaining_cols}) + return clean + + @final + def _agg_index(self, index, try_parse_dates: bool = True) -> Index: + arrays = [] + converters = self._clean_mapping(self.converters) + + for i, arr in enumerate(index): + if try_parse_dates and self._should_parse_dates(i): + arr = self._date_conv( + arr, + col=self.index_names[i] if self.index_names is not None else None, + ) + + if self.na_filter: + col_na_values = self.na_values + col_na_fvalues = self.na_fvalues + else: + col_na_values = set() + col_na_fvalues = set() + + if isinstance(self.na_values, dict): + assert self.index_names is not None + col_name = self.index_names[i] + if col_name is not None: + col_na_values, col_na_fvalues = _get_na_values( + col_name, self.na_values, self.na_fvalues, self.keep_default_na + ) + + clean_dtypes = self._clean_mapping(self.dtype) + + cast_type = None + index_converter = False + if self.index_names is not None: + if isinstance(clean_dtypes, dict): + cast_type = clean_dtypes.get(self.index_names[i], None) + + if isinstance(converters, dict): + index_converter = converters.get(self.index_names[i]) is not None + + try_num_bool = not ( + cast_type and is_string_dtype(cast_type) or index_converter + ) + + arr, _ = self._infer_types( + arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool + ) + arrays.append(arr) + + names = self.index_names + index = ensure_index_from_sequences(arrays, names) + + return index + + @final + def _convert_to_ndarrays( + self, + dct: Mapping, + na_values, + na_fvalues, + verbose: bool = False, + converters=None, + dtypes=None, + ): + result = {} + for c, values in dct.items(): + conv_f = None if converters is None else converters.get(c, None) + if isinstance(dtypes, dict): + cast_type = dtypes.get(c, None) + else: + # single dtype or None + cast_type = dtypes + + if self.na_filter: + col_na_values, col_na_fvalues = _get_na_values( + c, na_values, na_fvalues, self.keep_default_na + ) + else: + col_na_values, col_na_fvalues = set(), set() + + if c in self._parse_date_cols: + # GH#26203 Do not convert columns which get converted to dates + # but replace nans to ensure to_datetime works + mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues) + np.putmask(values, mask, np.nan) + result[c] = values + continue + + if conv_f is not None: + # conv_f applied to data before inference + if cast_type is not None: + warnings.warn( + ( + "Both a converter and dtype were specified " + f"for column {c} - only the converter will be used." + ), + ParserWarning, + stacklevel=find_stack_level(), + ) + + try: + values = lib.map_infer(values, conv_f) + except ValueError: + mask = algorithms.isin(values, list(na_values)).view(np.uint8) + values = lib.map_infer_mask(values, conv_f, mask) + + cvals, na_count = self._infer_types( + values, + set(col_na_values) | col_na_fvalues, + cast_type is None, + try_num_bool=False, + ) + else: + is_ea = is_extension_array_dtype(cast_type) + is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type) + # skip inference if specified dtype is object + # or casting to an EA + try_num_bool = not (cast_type and is_str_or_ea_dtype) + + # general type inference and conversion + cvals, na_count = self._infer_types( + values, + set(col_na_values) | col_na_fvalues, + cast_type is None, + try_num_bool, + ) + + # type specified in dtype param or cast_type is an EA + if cast_type is not None: + cast_type = pandas_dtype(cast_type) + if cast_type and (cvals.dtype != cast_type or is_ea): + if not is_ea and na_count > 0: + if is_bool_dtype(cast_type): + raise ValueError(f"Bool column has NA values in column {c}") + cvals = self._cast_types(cvals, cast_type, c) + + result[c] = cvals + if verbose and na_count: + print(f"Filled {na_count} NA values in column {c!s}") + return result + + @final + def _set_noconvert_dtype_columns( + self, col_indices: list[int], names: Sequence[Hashable] + ) -> set[int]: + """ + Set the columns that should not undergo dtype conversions. + + Currently, any column that is involved with date parsing will not + undergo such conversions. If usecols is specified, the positions of the columns + not to cast is relative to the usecols not to all columns. + + Parameters + ---------- + col_indices: The indices specifying order and positions of the columns + names: The column names which order is corresponding with the order + of col_indices + + Returns + ------- + A set of integers containing the positions of the columns not to convert. + """ + usecols: list[int] | list[str] | None + noconvert_columns = set() + if self.usecols_dtype == "integer": + # A set of integers will be converted to a list in + # the correct order every single time. + usecols = sorted(self.usecols) + elif callable(self.usecols) or self.usecols_dtype not in ("empty", None): + # The names attribute should have the correct columns + # in the proper order for indexing with parse_dates. + usecols = col_indices + else: + # Usecols is empty. + usecols = None + + def _set(x) -> int: + if usecols is not None and is_integer(x): + x = usecols[x] + + if not is_integer(x): + x = col_indices[names.index(x)] + + return x + + if isinstance(self.parse_dates, list): + for val in self.parse_dates: + if isinstance(val, list): + for k in val: + noconvert_columns.add(_set(k)) + else: + noconvert_columns.add(_set(val)) + + elif isinstance(self.parse_dates, dict): + for val in self.parse_dates.values(): + if isinstance(val, list): + for k in val: + noconvert_columns.add(_set(k)) + else: + noconvert_columns.add(_set(val)) + + elif self.parse_dates: + if isinstance(self.index_col, list): + for k in self.index_col: + noconvert_columns.add(_set(k)) + elif self.index_col is not None: + noconvert_columns.add(_set(self.index_col)) + + return noconvert_columns + + @final + def _infer_types( + self, values, na_values, no_dtype_specified, try_num_bool: bool = True + ) -> tuple[ArrayLike, int]: + """ + Infer types of values, possibly casting + + Parameters + ---------- + values : ndarray + na_values : set + no_dtype_specified: Specifies if we want to cast explicitly + try_num_bool : bool, default try + try to cast values to numeric (first preference) or boolean + + Returns + ------- + converted : ndarray or ExtensionArray + na_count : int + """ + na_count = 0 + if issubclass(values.dtype.type, (np.number, np.bool_)): + # If our array has numeric dtype, we don't have to check for strings in isin + na_values = np.array([val for val in na_values if not isinstance(val, str)]) + mask = algorithms.isin(values, na_values) + na_count = mask.astype("uint8", copy=False).sum() + if na_count > 0: + if is_integer_dtype(values): + values = values.astype(np.float64) + np.putmask(values, mask, np.nan) + return values, na_count + + dtype_backend = self.dtype_backend + non_default_dtype_backend = ( + no_dtype_specified and dtype_backend is not lib.no_default + ) + result: ArrayLike + + if try_num_bool and is_object_dtype(values.dtype): + # exclude e.g DatetimeIndex here + try: + result, result_mask = lib.maybe_convert_numeric( + values, + na_values, + False, + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] + ) + except (ValueError, TypeError): + # e.g. encountering datetime string gets ValueError + # TypeError can be raised in floatify + na_count = parsers.sanitize_objects(values, na_values) + result = values + else: + if non_default_dtype_backend: + if result_mask is None: + result_mask = np.zeros(result.shape, dtype=np.bool_) + + if result_mask.all(): + result = IntegerArray( + np.ones(result_mask.shape, dtype=np.int64), result_mask + ) + elif is_integer_dtype(result): + result = IntegerArray(result, result_mask) + elif is_bool_dtype(result): + result = BooleanArray(result, result_mask) + elif is_float_dtype(result): + result = FloatingArray(result, result_mask) + + na_count = result_mask.sum() + else: + na_count = isna(result).sum() + else: + result = values + if values.dtype == np.object_: + na_count = parsers.sanitize_objects(values, na_values) + + if result.dtype == np.object_ and try_num_bool: + result, bool_mask = libops.maybe_convert_bool( + np.asarray(values), + true_values=self.true_values, + false_values=self.false_values, + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] + ) + if result.dtype == np.bool_ and non_default_dtype_backend: + if bool_mask is None: + bool_mask = np.zeros(result.shape, dtype=np.bool_) + result = BooleanArray(result, bool_mask) + elif result.dtype == np.object_ and non_default_dtype_backend: + # read_excel sends array of datetime objects + if not lib.is_datetime_array(result, skipna=True): + dtype = StringDtype() + cls = dtype.construct_array_type() + result = cls._from_sequence(values, dtype=dtype) + + if dtype_backend == "pyarrow": + pa = import_optional_dependency("pyarrow") + if isinstance(result, np.ndarray): + result = ArrowExtensionArray(pa.array(result, from_pandas=True)) + elif isinstance(result, BaseMaskedArray): + if result._mask.all(): + # We want an arrow null array here + result = ArrowExtensionArray(pa.array([None] * len(result))) + else: + result = ArrowExtensionArray( + pa.array(result._data, mask=result._mask) + ) + else: + result = ArrowExtensionArray( + pa.array(result.to_numpy(), from_pandas=True) + ) + + return result, na_count + + @final + def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike: + """ + Cast values to specified type + + Parameters + ---------- + values : ndarray or ExtensionArray + cast_type : np.dtype or ExtensionDtype + dtype to cast values to + column : string + column name - used only for error reporting + + Returns + ------- + converted : ndarray or ExtensionArray + """ + if isinstance(cast_type, CategoricalDtype): + known_cats = cast_type.categories is not None + + if not is_object_dtype(values.dtype) and not known_cats: + # TODO: this is for consistency with + # c-parser which parses all categories + # as strings + values = lib.ensure_string_array( + values, skipna=False, convert_na_value=False + ) + + cats = Index(values).unique().dropna() + values = Categorical._from_inferred_categories( + cats, cats.get_indexer(values), cast_type, true_values=self.true_values + ) + + # use the EA's implementation of casting + elif isinstance(cast_type, ExtensionDtype): + array_type = cast_type.construct_array_type() + try: + if isinstance(cast_type, BooleanDtype): + # error: Unexpected keyword argument "true_values" for + # "_from_sequence_of_strings" of "ExtensionArray" + return array_type._from_sequence_of_strings( # type: ignore[call-arg] + values, + dtype=cast_type, + true_values=self.true_values, + false_values=self.false_values, + ) + else: + return array_type._from_sequence_of_strings(values, dtype=cast_type) + except NotImplementedError as err: + raise NotImplementedError( + f"Extension Array: {array_type} must implement " + "_from_sequence_of_strings in order to be used in parser methods" + ) from err + + elif isinstance(values, ExtensionArray): + values = values.astype(cast_type, copy=False) + elif issubclass(cast_type.type, str): + # TODO: why skipna=True here and False above? some tests depend + # on it here, but nothing fails if we change it above + # (as no tests get there as of 2022-12-06) + values = lib.ensure_string_array( + values, skipna=True, convert_na_value=False + ) + else: + try: + values = astype_array(values, cast_type, copy=True) + except ValueError as err: + raise ValueError( + f"Unable to convert column {column} to type {cast_type}" + ) from err + return values + + @overload + def _do_date_conversions( + self, + names: Index, + data: DataFrame, + ) -> tuple[Sequence[Hashable] | Index, DataFrame]: + ... + + @overload + def _do_date_conversions( + self, + names: Sequence[Hashable], + data: Mapping[Hashable, ArrayLike], + ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]: + ... + + @final + def _do_date_conversions( + self, + names: Sequence[Hashable] | Index, + data: Mapping[Hashable, ArrayLike] | DataFrame, + ) -> tuple[Sequence[Hashable] | Index, Mapping[Hashable, ArrayLike] | DataFrame]: + # returns data, columns + + if self.parse_dates is not None: + data, names = _process_date_conversion( + data, + self._date_conv, + self.parse_dates, + self.index_col, + self.index_names, + names, + keep_date_col=self.keep_date_col, + dtype_backend=self.dtype_backend, + ) + + return names, data + + @final + def _check_data_length( + self, + columns: Sequence[Hashable], + data: Sequence[ArrayLike], + ) -> None: + """Checks if length of data is equal to length of column names. + + One set of trailing commas is allowed. self.index_col not False + results in a ParserError previously when lengths do not match. + + Parameters + ---------- + columns: list of column names + data: list of array-likes containing the data column-wise. + """ + if not self.index_col and len(columns) != len(data) and columns: + empty_str = is_object_dtype(data[-1]) and data[-1] == "" + # error: No overload variant of "__ror__" of "ndarray" matches + # argument type "ExtensionArray" + empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator] + if len(columns) == len(data) - 1 and np.all(empty_str_or_na): + return + warnings.warn( + "Length of header or names does not match length of data. This leads " + "to a loss of data with index_col=False.", + ParserWarning, + stacklevel=find_stack_level(), + ) + + @overload + def _evaluate_usecols( + self, + usecols: set[int] | Callable[[Hashable], object], + names: Sequence[Hashable], + ) -> set[int]: + ... + + @overload + def _evaluate_usecols( + self, usecols: set[str], names: Sequence[Hashable] + ) -> set[str]: + ... + + @final + def _evaluate_usecols( + self, + usecols: Callable[[Hashable], object] | set[str] | set[int], + names: Sequence[Hashable], + ) -> set[str] | set[int]: + """ + Check whether or not the 'usecols' parameter + is a callable. If so, enumerates the 'names' + parameter and returns a set of indices for + each entry in 'names' that evaluates to True. + If not a callable, returns 'usecols'. + """ + if callable(usecols): + return {i for i, name in enumerate(names) if usecols(name)} + return usecols + + @final + def _validate_usecols_names(self, usecols, names: Sequence): + """ + Validates that all usecols are present in a given + list of names. If not, raise a ValueError that + shows what usecols are missing. + + Parameters + ---------- + usecols : iterable of usecols + The columns to validate are present in names. + names : iterable of names + The column names to check against. + + Returns + ------- + usecols : iterable of usecols + The `usecols` parameter if the validation succeeds. + + Raises + ------ + ValueError : Columns were missing. Error message will list them. + """ + missing = [c for c in usecols if c not in names] + if len(missing) > 0: + raise ValueError( + f"Usecols do not match columns, columns expected but not found: " + f"{missing}" + ) + + return usecols + + @final + def _validate_usecols_arg(self, usecols): + """ + Validate the 'usecols' parameter. + + Checks whether or not the 'usecols' parameter contains all integers + (column selection by index), strings (column by name) or is a callable. + Raises a ValueError if that is not the case. + + Parameters + ---------- + usecols : list-like, callable, or None + List of columns to use when parsing or a callable that can be used + to filter a list of table columns. + + Returns + ------- + usecols_tuple : tuple + A tuple of (verified_usecols, usecols_dtype). + + 'verified_usecols' is either a set if an array-like is passed in or + 'usecols' if a callable or None is passed in. + + 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like + is passed in or None if a callable or None is passed in. + """ + msg = ( + "'usecols' must either be list-like of all strings, all unicode, " + "all integers or a callable." + ) + if usecols is not None: + if callable(usecols): + return usecols, None + + if not is_list_like(usecols): + # see gh-20529 + # + # Ensure it is iterable container but not string. + raise ValueError(msg) + + usecols_dtype = lib.infer_dtype(usecols, skipna=False) + + if usecols_dtype not in ("empty", "integer", "string"): + raise ValueError(msg) + + usecols = set(usecols) + + return usecols, usecols_dtype + return usecols, None + + @final + def _clean_index_names(self, columns, index_col) -> tuple[list | None, list, list]: + if not is_index_col(index_col): + return None, columns, index_col + + columns = list(columns) + + # In case of no rows and multiindex columns we have to set index_names to + # list of Nones GH#38292 + if not columns: + return [None] * len(index_col), columns, index_col + + cp_cols = list(columns) + index_names: list[str | int | None] = [] + + # don't mutate + index_col = list(index_col) + + for i, c in enumerate(index_col): + if isinstance(c, str): + index_names.append(c) + for j, name in enumerate(cp_cols): + if name == c: + index_col[i] = j + columns.remove(name) + break + else: + name = cp_cols[c] + columns.remove(name) + index_names.append(name) + + # Only clean index names that were placeholders. + for i, name in enumerate(index_names): + if isinstance(name, str) and name in self.unnamed_cols: + index_names[i] = None + + return index_names, columns, index_col + + @final + def _get_empty_meta(self, columns, dtype: DtypeArg | None = None): + columns = list(columns) + + index_col = self.index_col + index_names = self.index_names + + # Convert `dtype` to a defaultdict of some kind. + # This will enable us to write `dtype[col_name]` + # without worrying about KeyError issues later on. + dtype_dict: defaultdict[Hashable, Any] + if not is_dict_like(dtype): + # if dtype == None, default will be object. + default_dtype = dtype or object + dtype_dict = defaultdict(lambda: default_dtype) + else: + dtype = cast(dict, dtype) + dtype_dict = defaultdict( + lambda: object, + {columns[k] if is_integer(k) else k: v for k, v in dtype.items()}, + ) + + # Even though we have no data, the "index" of the empty DataFrame + # could for example still be an empty MultiIndex. Thus, we need to + # check whether we have any index columns specified, via either: + # + # 1) index_col (column indices) + # 2) index_names (column names) + # + # Both must be non-null to ensure a successful construction. Otherwise, + # we have to create a generic empty Index. + index: Index + if (index_col is None or index_col is False) or index_names is None: + index = default_index(0) + else: + data = [Series([], dtype=dtype_dict[name]) for name in index_names] + index = ensure_index_from_sequences(data, names=index_names) + index_col.sort() + + for i, n in enumerate(index_col): + columns.pop(n - i) + + col_dict = { + col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns + } + + return index, columns, col_dict + + +def _make_date_converter( + date_parser=lib.no_default, + dayfirst: bool = False, + cache_dates: bool = True, + date_format: dict[Hashable, str] | str | None = None, +): + if date_parser is not lib.no_default: + warnings.warn( + "The argument 'date_parser' is deprecated and will " + "be removed in a future version. " + "Please use 'date_format' instead, or read your data in as 'object' dtype " + "and then call 'to_datetime'.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if date_parser is not lib.no_default and date_format is not None: + raise TypeError("Cannot use both 'date_parser' and 'date_format'") + + def unpack_if_single_element(arg): + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + if isinstance(arg, np.ndarray) and arg.ndim == 1 and len(arg) == 1: + return arg[0] + return arg + + def converter(*date_cols, col: Hashable): + if len(date_cols) == 1 and date_cols[0].dtype.kind in "Mm": + return date_cols[0] + + if date_parser is lib.no_default: + strs = parsing.concat_date_cols(date_cols) + date_fmt = ( + date_format.get(col) if isinstance(date_format, dict) else date_format + ) + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time zones will raise an error", + category=FutureWarning, + ) + str_objs = ensure_object(strs) + try: + result = tools.to_datetime( + str_objs, + format=date_fmt, + utc=False, + dayfirst=dayfirst, + cache=cache_dates, + ) + except (ValueError, TypeError): + # test_usecols_with_parse_dates4 + return str_objs + + if isinstance(result, DatetimeIndex): + arr = result.to_numpy() + arr.flags.writeable = True + return arr + return result._values + else: + try: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time zones " + "will raise an error", + category=FutureWarning, + ) + pre_parsed = date_parser( + *(unpack_if_single_element(arg) for arg in date_cols) + ) + try: + result = tools.to_datetime( + pre_parsed, + cache=cache_dates, + ) + except (ValueError, TypeError): + # test_read_csv_with_custom_date_parser + result = pre_parsed + if isinstance(result, datetime.datetime): + raise Exception("scalar parser") + return result + except Exception: + # e.g. test_datetime_fractional_seconds + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time zones " + "will raise an error", + category=FutureWarning, + ) + pre_parsed = parsing.try_parse_dates( + parsing.concat_date_cols(date_cols), + parser=date_parser, + ) + try: + return tools.to_datetime(pre_parsed) + except (ValueError, TypeError): + # TODO: not reached in tests 2023-10-27; needed? + return pre_parsed + + return converter + + +parser_defaults = { + "delimiter": None, + "escapechar": None, + "quotechar": '"', + "quoting": csv.QUOTE_MINIMAL, + "doublequote": True, + "skipinitialspace": False, + "lineterminator": None, + "header": "infer", + "index_col": None, + "names": None, + "skiprows": None, + "skipfooter": 0, + "nrows": None, + "na_values": None, + "keep_default_na": True, + "true_values": None, + "false_values": None, + "converters": None, + "dtype": None, + "cache_dates": True, + "thousands": None, + "comment": None, + "decimal": ".", + # 'engine': 'c', + "parse_dates": False, + "keep_date_col": False, + "dayfirst": False, + "date_parser": lib.no_default, + "date_format": None, + "usecols": None, + # 'iterator': False, + "chunksize": None, + "verbose": False, + "encoding": None, + "compression": None, + "skip_blank_lines": True, + "encoding_errors": "strict", + "on_bad_lines": ParserBase.BadLineHandleMethod.ERROR, + "dtype_backend": lib.no_default, +} + + +def _process_date_conversion( + data_dict, + converter: Callable, + parse_spec, + index_col, + index_names, + columns, + keep_date_col: bool = False, + dtype_backend=lib.no_default, +): + def _isindex(colspec): + return (isinstance(index_col, list) and colspec in index_col) or ( + isinstance(index_names, list) and colspec in index_names + ) + + new_cols = [] + new_data = {} + + orig_names = columns + columns = list(columns) + + date_cols = set() + + if parse_spec is None or isinstance(parse_spec, bool): + return data_dict, columns + + if isinstance(parse_spec, list): + # list of column lists + for colspec in parse_spec: + if is_scalar(colspec) or isinstance(colspec, tuple): + if isinstance(colspec, int) and colspec not in data_dict: + colspec = orig_names[colspec] + if _isindex(colspec): + continue + elif dtype_backend == "pyarrow": + import pyarrow as pa + + dtype = data_dict[colspec].dtype + if isinstance(dtype, ArrowDtype) and ( + pa.types.is_timestamp(dtype.pyarrow_dtype) + or pa.types.is_date(dtype.pyarrow_dtype) + ): + continue + + # Pyarrow engine returns Series which we need to convert to + # numpy array before converter, its a no-op for other parsers + data_dict[colspec] = converter( + np.asarray(data_dict[colspec]), col=colspec + ) + else: + new_name, col, old_names = _try_convert_dates( + converter, colspec, data_dict, orig_names + ) + if new_name in data_dict: + raise ValueError(f"New date column already in dict {new_name}") + new_data[new_name] = col + new_cols.append(new_name) + date_cols.update(old_names) + + elif isinstance(parse_spec, dict): + # dict of new name to column list + for new_name, colspec in parse_spec.items(): + if new_name in data_dict: + raise ValueError(f"Date column {new_name} already in dict") + + _, col, old_names = _try_convert_dates( + converter, + colspec, + data_dict, + orig_names, + target_name=new_name, + ) + + new_data[new_name] = col + + # If original column can be converted to date we keep the converted values + # This can only happen if values are from single column + if len(colspec) == 1: + new_data[colspec[0]] = col + + new_cols.append(new_name) + date_cols.update(old_names) + + if isinstance(data_dict, DataFrame): + data_dict = concat([DataFrame(new_data), data_dict], axis=1, copy=False) + else: + data_dict.update(new_data) + new_cols.extend(columns) + + if not keep_date_col: + for c in list(date_cols): + data_dict.pop(c) + new_cols.remove(c) + + return data_dict, new_cols + + +def _try_convert_dates( + parser: Callable, colspec, data_dict, columns, target_name: str | None = None +): + colset = set(columns) + colnames = [] + + for c in colspec: + if c in colset: + colnames.append(c) + elif isinstance(c, int) and c not in columns: + colnames.append(columns[c]) + else: + colnames.append(c) + + new_name: tuple | str + if all(isinstance(x, tuple) for x in colnames): + new_name = tuple(map("_".join, zip(*colnames))) + else: + new_name = "_".join([str(x) for x in colnames]) + to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict] + + new_col = parser(*to_parse, col=new_name if target_name is None else target_name) + return new_name, new_col, colnames + + +def _get_na_values(col, na_values, na_fvalues, keep_default_na: bool): + """ + Get the NaN values for a given column. + + Parameters + ---------- + col : str + The name of the column. + na_values : array-like, dict + The object listing the NaN values as strings. + na_fvalues : array-like, dict + The object listing the NaN values as floats. + keep_default_na : bool + If `na_values` is a dict, and the column is not mapped in the + dictionary, whether to return the default NaN values or the empty set. + + Returns + ------- + nan_tuple : A length-two tuple composed of + + 1) na_values : the string NaN values for that column. + 2) na_fvalues : the float NaN values for that column. + """ + if isinstance(na_values, dict): + if col in na_values: + return na_values[col], na_fvalues[col] + else: + if keep_default_na: + return STR_NA_VALUES, set() + + return set(), set() + else: + return na_values, na_fvalues + + +def _validate_parse_dates_arg(parse_dates): + """ + Check whether or not the 'parse_dates' parameter + is a non-boolean scalar. Raises a ValueError if + that is the case. + """ + msg = ( + "Only booleans, lists, and dictionaries are accepted " + "for the 'parse_dates' parameter" + ) + + if not ( + parse_dates is None + or lib.is_bool(parse_dates) + or isinstance(parse_dates, (list, dict)) + ): + raise TypeError(msg) + + return parse_dates + + +def is_index_col(col) -> bool: + return col is not None and col is not False diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..0cd788c5e57399597e3fe4ee1b1bf2af4bffd74b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py @@ -0,0 +1,410 @@ +from __future__ import annotations + +from collections import defaultdict +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + parsers, +) +from pandas.compat._optional import import_optional_dependency +from pandas.errors import DtypeWarning +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.concat import ( + concat_compat, + union_categoricals, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas.core.indexes.api import ensure_index_from_sequences + +from pandas.io.common import ( + dedup_names, + is_potential_multi_index, +) +from pandas.io.parsers.base_parser import ( + ParserBase, + ParserError, + is_index_col, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Mapping, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + DtypeArg, + DtypeObj, + ReadCsvBuffer, + ) + + from pandas import ( + Index, + MultiIndex, + ) + + +class CParserWrapper(ParserBase): + low_memory: bool + _reader: parsers.TextReader + + def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None: + super().__init__(kwds) + self.kwds = kwds + kwds = kwds.copy() + + self.low_memory = kwds.pop("low_memory", False) + + # #2442 + # error: Cannot determine type of 'index_col' + kwds["allow_leading_cols"] = ( + self.index_col is not False # type: ignore[has-type] + ) + + # GH20529, validate usecol arg before TextReader + kwds["usecols"] = self.usecols + + # Have to pass int, would break tests using TextReader directly otherwise :( + kwds["on_bad_lines"] = self.on_bad_lines.value + + for key in ( + "storage_options", + "encoding", + "memory_map", + "compression", + ): + kwds.pop(key, None) + + kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None)) + if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default: + kwds["dtype_backend"] = "numpy" + if kwds["dtype_backend"] == "pyarrow": + # Fail here loudly instead of in cython after reading + import_optional_dependency("pyarrow") + self._reader = parsers.TextReader(src, **kwds) + + self.unnamed_cols = self._reader.unnamed_cols + + # error: Cannot determine type of 'names' + passed_names = self.names is None # type: ignore[has-type] + + if self._reader.header is None: + self.names = None + else: + # error: Cannot determine type of 'names' + # error: Cannot determine type of 'index_names' + ( + self.names, # type: ignore[has-type] + self.index_names, + self.col_names, + passed_names, + ) = self._extract_multi_indexer_columns( + self._reader.header, + self.index_names, # type: ignore[has-type] + passed_names, + ) + + # error: Cannot determine type of 'names' + if self.names is None: # type: ignore[has-type] + self.names = list(range(self._reader.table_width)) + + # gh-9755 + # + # need to set orig_names here first + # so that proper indexing can be done + # with _set_noconvert_columns + # + # once names has been filtered, we will + # then set orig_names again to names + # error: Cannot determine type of 'names' + self.orig_names = self.names[:] # type: ignore[has-type] + + if self.usecols: + usecols = self._evaluate_usecols(self.usecols, self.orig_names) + + # GH 14671 + # assert for mypy, orig_names is List or None, None would error in issubset + assert self.orig_names is not None + if self.usecols_dtype == "string" and not set(usecols).issubset( + self.orig_names + ): + self._validate_usecols_names(usecols, self.orig_names) + + # error: Cannot determine type of 'names' + if len(self.names) > len(usecols): # type: ignore[has-type] + # error: Cannot determine type of 'names' + self.names = [ # type: ignore[has-type] + n + # error: Cannot determine type of 'names' + for i, n in enumerate(self.names) # type: ignore[has-type] + if (i in usecols or n in usecols) + ] + + # error: Cannot determine type of 'names' + if len(self.names) < len(usecols): # type: ignore[has-type] + # error: Cannot determine type of 'names' + self._validate_usecols_names( + usecols, + self.names, # type: ignore[has-type] + ) + + # error: Cannot determine type of 'names' + self._validate_parse_dates_presence(self.names) # type: ignore[has-type] + self._set_noconvert_columns() + + # error: Cannot determine type of 'names' + self.orig_names = self.names # type: ignore[has-type] + + if not self._has_complex_date_col: + # error: Cannot determine type of 'index_col' + if self._reader.leading_cols == 0 and is_index_col( + self.index_col # type: ignore[has-type] + ): + self._name_processed = True + ( + index_names, + # error: Cannot determine type of 'names' + self.names, # type: ignore[has-type] + self.index_col, + ) = self._clean_index_names( + # error: Cannot determine type of 'names' + self.names, # type: ignore[has-type] + # error: Cannot determine type of 'index_col' + self.index_col, # type: ignore[has-type] + ) + + if self.index_names is None: + self.index_names = index_names + + if self._reader.header is None and not passed_names: + assert self.index_names is not None + self.index_names = [None] * len(self.index_names) + + self._implicit_index = self._reader.leading_cols > 0 + + def close(self) -> None: + # close handles opened by C parser + try: + self._reader.close() + except ValueError: + pass + + def _set_noconvert_columns(self) -> None: + """ + Set the columns that should not undergo dtype conversions. + + Currently, any column that is involved with date parsing will not + undergo such conversions. + """ + assert self.orig_names is not None + # error: Cannot determine type of 'names' + + # much faster than using orig_names.index(x) xref GH#44106 + names_dict = {x: i for i, x in enumerate(self.orig_names)} + col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type] + # error: Cannot determine type of 'names' + noconvert_columns = self._set_noconvert_dtype_columns( + col_indices, + self.names, # type: ignore[has-type] + ) + for col in noconvert_columns: + self._reader.set_noconvert(col) + + def read( + self, + nrows: int | None = None, + ) -> tuple[ + Index | MultiIndex | None, + Sequence[Hashable] | MultiIndex, + Mapping[Hashable, ArrayLike], + ]: + index: Index | MultiIndex | None + column_names: Sequence[Hashable] | MultiIndex + try: + if self.low_memory: + chunks = self._reader.read_low_memory(nrows) + # destructive to chunks + data = _concatenate_chunks(chunks) + + else: + data = self._reader.read(nrows) + except StopIteration: + if self._first_chunk: + self._first_chunk = False + names = dedup_names( + self.orig_names, + is_potential_multi_index(self.orig_names, self.index_col), + ) + index, columns, col_dict = self._get_empty_meta( + names, + dtype=self.dtype, + ) + columns = self._maybe_make_multi_index_columns(columns, self.col_names) + + if self.usecols is not None: + columns = self._filter_usecols(columns) + + col_dict = {k: v for k, v in col_dict.items() if k in columns} + + return index, columns, col_dict + + else: + self.close() + raise + + # Done with first read, next time raise StopIteration + self._first_chunk = False + + # error: Cannot determine type of 'names' + names = self.names # type: ignore[has-type] + + if self._reader.leading_cols: + if self._has_complex_date_col: + raise NotImplementedError("file structure not yet supported") + + # implicit index, no index names + arrays = [] + + if self.index_col and self._reader.leading_cols != len(self.index_col): + raise ParserError( + "Could not construct index. Requested to use " + f"{len(self.index_col)} number of columns, but " + f"{self._reader.leading_cols} left to parse." + ) + + for i in range(self._reader.leading_cols): + if self.index_col is None: + values = data.pop(i) + else: + values = data.pop(self.index_col[i]) + + values = self._maybe_parse_dates(values, i, try_parse_dates=True) + arrays.append(values) + + index = ensure_index_from_sequences(arrays) + + if self.usecols is not None: + names = self._filter_usecols(names) + + names = dedup_names(names, is_potential_multi_index(names, self.index_col)) + + # rename dict keys + data_tups = sorted(data.items()) + data = {k: v for k, (i, v) in zip(names, data_tups)} + + column_names, date_data = self._do_date_conversions(names, data) + + # maybe create a mi on the columns + column_names = self._maybe_make_multi_index_columns( + column_names, self.col_names + ) + + else: + # rename dict keys + data_tups = sorted(data.items()) + + # ugh, mutation + + # assert for mypy, orig_names is List or None, None would error in list(...) + assert self.orig_names is not None + names = list(self.orig_names) + names = dedup_names(names, is_potential_multi_index(names, self.index_col)) + + if self.usecols is not None: + names = self._filter_usecols(names) + + # columns as list + alldata = [x[1] for x in data_tups] + if self.usecols is None: + self._check_data_length(names, alldata) + + data = {k: v for k, (i, v) in zip(names, data_tups)} + + names, date_data = self._do_date_conversions(names, data) + index, column_names = self._make_index(date_data, alldata, names) + + return index, column_names, date_data + + def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]: + # hackish + usecols = self._evaluate_usecols(self.usecols, names) + if usecols is not None and len(names) != len(usecols): + names = [ + name for i, name in enumerate(names) if i in usecols or name in usecols + ] + return names + + def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True): + if try_parse_dates and self._should_parse_dates(index): + values = self._date_conv( + values, + col=self.index_names[index] if self.index_names is not None else None, + ) + return values + + +def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict: + """ + Concatenate chunks of data read with low_memory=True. + + The tricky part is handling Categoricals, where different chunks + may have different inferred categories. + """ + names = list(chunks[0].keys()) + warning_columns = [] + + result: dict = {} + for name in names: + arrs = [chunk.pop(name) for chunk in chunks] + # Check each arr for consistent types. + dtypes = {a.dtype for a in arrs} + non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)} + + dtype = dtypes.pop() + if isinstance(dtype, CategoricalDtype): + result[name] = union_categoricals(arrs, sort_categories=False) + else: + result[name] = concat_compat(arrs) + if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object): + warning_columns.append(str(name)) + + if warning_columns: + warning_names = ",".join(warning_columns) + warning_message = " ".join( + [ + f"Columns ({warning_names}) have mixed types. " + f"Specify dtype option on import or set low_memory=False." + ] + ) + warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level()) + return result + + +def ensure_dtype_objs( + dtype: DtypeArg | dict[Hashable, DtypeArg] | None +) -> DtypeObj | dict[Hashable, DtypeObj] | None: + """ + Ensure we have either None, a dtype object, or a dictionary mapping to + dtype objects. + """ + if isinstance(dtype, defaultdict): + # "None" not callable [misc] + default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc] + dtype_converted: defaultdict = defaultdict(lambda: default_dtype) + for key in dtype.keys(): + dtype_converted[key] = pandas_dtype(dtype[key]) + return dtype_converted + elif isinstance(dtype, dict): + return {k: pandas_dtype(dtype[k]) for k in dtype} + elif dtype is not None: + return pandas_dtype(dtype) + return dtype diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..79e7554a5744cf439a65e9fd1e18782a0fa71548 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py @@ -0,0 +1,1387 @@ +from __future__ import annotations + +from collections import ( + abc, + defaultdict, +) +from collections.abc import ( + Hashable, + Iterator, + Mapping, + Sequence, +) +import csv +from io import StringIO +import re +from typing import ( + IO, + TYPE_CHECKING, + DefaultDict, + Literal, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas.errors import ( + EmptyDataError, + ParserError, + ParserWarning, +) +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer, + is_numeric_dtype, +) +from pandas.core.dtypes.inference import is_dict_like + +from pandas.io.common import ( + dedup_names, + is_potential_multi_index, +) +from pandas.io.parsers.base_parser import ( + ParserBase, + parser_defaults, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + ReadCsvBuffer, + Scalar, + ) + + from pandas import ( + Index, + MultiIndex, + ) + +# BOM character (byte order mark) +# This exists at the beginning of a file to indicate endianness +# of a file (stream). Unfortunately, this marker screws up parsing, +# so we need to remove it if we see it. +_BOM = "\ufeff" + + +class PythonParser(ParserBase): + _no_thousands_columns: set[int] + + def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None: + """ + Workhorse function for processing nested list into DataFrame + """ + super().__init__(kwds) + + self.data: Iterator[str] | None = None + self.buf: list = [] + self.pos = 0 + self.line_pos = 0 + + self.skiprows = kwds["skiprows"] + + if callable(self.skiprows): + self.skipfunc = self.skiprows + else: + self.skipfunc = lambda x: x in self.skiprows + + self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"]) + self.delimiter = kwds["delimiter"] + + self.quotechar = kwds["quotechar"] + if isinstance(self.quotechar, str): + self.quotechar = str(self.quotechar) + + self.escapechar = kwds["escapechar"] + self.doublequote = kwds["doublequote"] + self.skipinitialspace = kwds["skipinitialspace"] + self.lineterminator = kwds["lineterminator"] + self.quoting = kwds["quoting"] + self.skip_blank_lines = kwds["skip_blank_lines"] + + self.has_index_names = False + if "has_index_names" in kwds: + self.has_index_names = kwds["has_index_names"] + + self.verbose = kwds["verbose"] + + self.thousands = kwds["thousands"] + self.decimal = kwds["decimal"] + + self.comment = kwds["comment"] + + # Set self.data to something that can read lines. + if isinstance(f, list): + # read_excel: f is a list + self.data = cast(Iterator[str], f) + else: + assert hasattr(f, "readline") + self.data = self._make_reader(f) + + # Get columns in two steps: infer from data, then + # infer column indices from self.usecols if it is specified. + self._col_indices: list[int] | None = None + columns: list[list[Scalar | None]] + ( + columns, + self.num_original_columns, + self.unnamed_cols, + ) = self._infer_columns() + + # Now self.columns has the set of columns that we will process. + # The original set is stored in self.original_columns. + # error: Cannot determine type of 'index_names' + ( + self.columns, + self.index_names, + self.col_names, + _, + ) = self._extract_multi_indexer_columns( + columns, + self.index_names, # type: ignore[has-type] + ) + + # get popped off for index + self.orig_names: list[Hashable] = list(self.columns) + + # needs to be cleaned/refactored + # multiple date column thing turning into a real spaghetti factory + + if not self._has_complex_date_col: + (index_names, self.orig_names, self.columns) = self._get_index_name() + self._name_processed = True + if self.index_names is None: + self.index_names = index_names + + if self._col_indices is None: + self._col_indices = list(range(len(self.columns))) + + self._parse_date_cols = self._validate_parse_dates_presence(self.columns) + self._no_thousands_columns = self._set_no_thousand_columns() + + if len(self.decimal) != 1: + raise ValueError("Only length-1 decimal markers supported") + + @cache_readonly + def num(self) -> re.Pattern: + decimal = re.escape(self.decimal) + if self.thousands is None: + regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$" + else: + thousands = re.escape(self.thousands) + regex = ( + rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?" + rf"([0-9]?(E|e)\-?[0-9]+)?$" + ) + return re.compile(regex) + + def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]): + sep = self.delimiter + + if sep is None or len(sep) == 1: + if self.lineterminator: + raise ValueError( + "Custom line terminators not supported in python parser (yet)" + ) + + class MyDialect(csv.Dialect): + delimiter = self.delimiter + quotechar = self.quotechar + escapechar = self.escapechar + doublequote = self.doublequote + skipinitialspace = self.skipinitialspace + quoting = self.quoting + lineterminator = "\n" + + dia = MyDialect + + if sep is not None: + dia.delimiter = sep + else: + # attempt to sniff the delimiter from the first valid line, + # i.e. no comment line and not in skiprows + line = f.readline() + lines = self._check_comments([[line]])[0] + while self.skipfunc(self.pos) or not lines: + self.pos += 1 + line = f.readline() + lines = self._check_comments([[line]])[0] + lines_str = cast(list[str], lines) + + # since `line` was a string, lines will be a list containing + # only a single string + line = lines_str[0] + + self.pos += 1 + self.line_pos += 1 + sniffed = csv.Sniffer().sniff(line) + dia.delimiter = sniffed.delimiter + + # Note: encoding is irrelevant here + line_rdr = csv.reader(StringIO(line), dialect=dia) + self.buf.extend(list(line_rdr)) + + # Note: encoding is irrelevant here + reader = csv.reader(f, dialect=dia, strict=True) + + else: + + def _read(): + line = f.readline() + pat = re.compile(sep) + + yield pat.split(line.strip()) + + for line in f: + yield pat.split(line.strip()) + + reader = _read() + + return reader + + def read( + self, rows: int | None = None + ) -> tuple[ + Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike] + ]: + try: + content = self._get_lines(rows) + except StopIteration: + if self._first_chunk: + content = [] + else: + self.close() + raise + + # done with first read, next time raise StopIteration + self._first_chunk = False + + columns: Sequence[Hashable] = list(self.orig_names) + if not len(content): # pragma: no cover + # DataFrame with the right metadata, even though it's length 0 + # error: Cannot determine type of 'index_col' + names = dedup_names( + self.orig_names, + is_potential_multi_index( + self.orig_names, + self.index_col, # type: ignore[has-type] + ), + ) + index, columns, col_dict = self._get_empty_meta( + names, + self.dtype, + ) + conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names) + return index, conv_columns, col_dict + + # handle new style for names in index + count_empty_content_vals = count_empty_vals(content[0]) + indexnamerow = None + if self.has_index_names and count_empty_content_vals == len(columns): + indexnamerow = content[0] + content = content[1:] + + alldata = self._rows_to_cols(content) + data, columns = self._exclude_implicit_index(alldata) + + conv_data = self._convert_data(data) + columns, conv_data = self._do_date_conversions(columns, conv_data) + + index, result_columns = self._make_index( + conv_data, alldata, columns, indexnamerow + ) + + return index, result_columns, conv_data + + def _exclude_implicit_index( + self, + alldata: list[np.ndarray], + ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]: + # error: Cannot determine type of 'index_col' + names = dedup_names( + self.orig_names, + is_potential_multi_index( + self.orig_names, + self.index_col, # type: ignore[has-type] + ), + ) + + offset = 0 + if self._implicit_index: + # error: Cannot determine type of 'index_col' + offset = len(self.index_col) # type: ignore[has-type] + + len_alldata = len(alldata) + self._check_data_length(names, alldata) + + return { + name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata + }, names + + # legacy + def get_chunk( + self, size: int | None = None + ) -> tuple[ + Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike] + ]: + if size is None: + # error: "PythonParser" has no attribute "chunksize" + size = self.chunksize # type: ignore[attr-defined] + return self.read(rows=size) + + def _convert_data( + self, + data: Mapping[Hashable, np.ndarray], + ) -> Mapping[Hashable, ArrayLike]: + # apply converters + clean_conv = self._clean_mapping(self.converters) + clean_dtypes = self._clean_mapping(self.dtype) + + # Apply NA values. + clean_na_values = {} + clean_na_fvalues = {} + + if isinstance(self.na_values, dict): + for col in self.na_values: + na_value = self.na_values[col] + na_fvalue = self.na_fvalues[col] + + if isinstance(col, int) and col not in self.orig_names: + col = self.orig_names[col] + + clean_na_values[col] = na_value + clean_na_fvalues[col] = na_fvalue + else: + clean_na_values = self.na_values + clean_na_fvalues = self.na_fvalues + + return self._convert_to_ndarrays( + data, + clean_na_values, + clean_na_fvalues, + self.verbose, + clean_conv, + clean_dtypes, + ) + + @cache_readonly + def _have_mi_columns(self) -> bool: + if self.header is None: + return False + + header = self.header + if isinstance(header, (list, tuple, np.ndarray)): + return len(header) > 1 + else: + return False + + def _infer_columns( + self, + ) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]: + names = self.names + num_original_columns = 0 + clear_buffer = True + unnamed_cols: set[Scalar | None] = set() + + if self.header is not None: + header = self.header + have_mi_columns = self._have_mi_columns + + if isinstance(header, (list, tuple, np.ndarray)): + # we have a mi columns, so read an extra line + if have_mi_columns: + header = list(header) + [header[-1] + 1] + else: + header = [header] + + columns: list[list[Scalar | None]] = [] + for level, hr in enumerate(header): + try: + line = self._buffered_line() + + while self.line_pos <= hr: + line = self._next_line() + + except StopIteration as err: + if 0 < self.line_pos <= hr and ( + not have_mi_columns or hr != header[-1] + ): + # If no rows we want to raise a different message and if + # we have mi columns, the last line is not part of the header + joi = list(map(str, header[:-1] if have_mi_columns else header)) + msg = f"[{','.join(joi)}], len of {len(joi)}, " + raise ValueError( + f"Passed header={msg}" + f"but only {self.line_pos} lines in file" + ) from err + + # We have an empty file, so check + # if columns are provided. That will + # serve as the 'line' for parsing + if have_mi_columns and hr > 0: + if clear_buffer: + self._clear_buffer() + columns.append([None] * len(columns[-1])) + return columns, num_original_columns, unnamed_cols + + if not self.names: + raise EmptyDataError("No columns to parse from file") from err + + line = self.names[:] + + this_columns: list[Scalar | None] = [] + this_unnamed_cols = [] + + for i, c in enumerate(line): + if c == "": + if have_mi_columns: + col_name = f"Unnamed: {i}_level_{level}" + else: + col_name = f"Unnamed: {i}" + + this_unnamed_cols.append(i) + this_columns.append(col_name) + else: + this_columns.append(c) + + if not have_mi_columns: + counts: DefaultDict = defaultdict(int) + # Ensure that regular columns are used before unnamed ones + # to keep given names and mangle unnamed columns + col_loop_order = [ + i + for i in range(len(this_columns)) + if i not in this_unnamed_cols + ] + this_unnamed_cols + + # TODO: Use pandas.io.common.dedup_names instead (see #50371) + for i in col_loop_order: + col = this_columns[i] + old_col = col + cur_count = counts[col] + + if cur_count > 0: + while cur_count > 0: + counts[old_col] = cur_count + 1 + col = f"{old_col}.{cur_count}" + if col in this_columns: + cur_count += 1 + else: + cur_count = counts[col] + + if ( + self.dtype is not None + and is_dict_like(self.dtype) + and self.dtype.get(old_col) is not None + and self.dtype.get(col) is None + ): + self.dtype.update({col: self.dtype.get(old_col)}) + this_columns[i] = col + counts[col] = cur_count + 1 + elif have_mi_columns: + # if we have grabbed an extra line, but its not in our + # format so save in the buffer, and create an blank extra + # line for the rest of the parsing code + if hr == header[-1]: + lc = len(this_columns) + # error: Cannot determine type of 'index_col' + sic = self.index_col # type: ignore[has-type] + ic = len(sic) if sic is not None else 0 + unnamed_count = len(this_unnamed_cols) + + # if wrong number of blanks or no index, not our format + if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0: + clear_buffer = False + this_columns = [None] * lc + self.buf = [self.buf[-1]] + + columns.append(this_columns) + unnamed_cols.update({this_columns[i] for i in this_unnamed_cols}) + + if len(columns) == 1: + num_original_columns = len(this_columns) + + if clear_buffer: + self._clear_buffer() + + first_line: list[Scalar] | None + if names is not None: + # Read first row after header to check if data are longer + try: + first_line = self._next_line() + except StopIteration: + first_line = None + + len_first_data_row = 0 if first_line is None else len(first_line) + + if len(names) > len(columns[0]) and len(names) > len_first_data_row: + raise ValueError( + "Number of passed names did not match " + "number of header fields in the file" + ) + if len(columns) > 1: + raise TypeError("Cannot pass names with multi-index columns") + + if self.usecols is not None: + # Set _use_cols. We don't store columns because they are + # overwritten. + self._handle_usecols(columns, names, num_original_columns) + else: + num_original_columns = len(names) + if self._col_indices is not None and len(names) != len( + self._col_indices + ): + columns = [[names[i] for i in sorted(self._col_indices)]] + else: + columns = [names] + else: + columns = self._handle_usecols( + columns, columns[0], num_original_columns + ) + else: + ncols = len(self._header_line) + num_original_columns = ncols + + if not names: + columns = [list(range(ncols))] + columns = self._handle_usecols(columns, columns[0], ncols) + elif self.usecols is None or len(names) >= ncols: + columns = self._handle_usecols([names], names, ncols) + num_original_columns = len(names) + elif not callable(self.usecols) and len(names) != len(self.usecols): + raise ValueError( + "Number of passed names did not match number of " + "header fields in the file" + ) + else: + # Ignore output but set used columns. + columns = [names] + self._handle_usecols(columns, columns[0], ncols) + + return columns, num_original_columns, unnamed_cols + + @cache_readonly + def _header_line(self): + # Store line for reuse in _get_index_name + if self.header is not None: + return None + + try: + line = self._buffered_line() + except StopIteration as err: + if not self.names: + raise EmptyDataError("No columns to parse from file") from err + + line = self.names[:] + return line + + def _handle_usecols( + self, + columns: list[list[Scalar | None]], + usecols_key: list[Scalar | None], + num_original_columns: int, + ) -> list[list[Scalar | None]]: + """ + Sets self._col_indices + + usecols_key is used if there are string usecols. + """ + col_indices: set[int] | list[int] + if self.usecols is not None: + if callable(self.usecols): + col_indices = self._evaluate_usecols(self.usecols, usecols_key) + elif any(isinstance(u, str) for u in self.usecols): + if len(columns) > 1: + raise ValueError( + "If using multiple headers, usecols must be integers." + ) + col_indices = [] + + for col in self.usecols: + if isinstance(col, str): + try: + col_indices.append(usecols_key.index(col)) + except ValueError: + self._validate_usecols_names(self.usecols, usecols_key) + else: + col_indices.append(col) + else: + missing_usecols = [ + col for col in self.usecols if col >= num_original_columns + ] + if missing_usecols: + raise ParserError( + "Defining usecols with out-of-bounds indices is not allowed. " + f"{missing_usecols} are out-of-bounds.", + ) + col_indices = self.usecols + + columns = [ + [n for i, n in enumerate(column) if i in col_indices] + for column in columns + ] + self._col_indices = sorted(col_indices) + return columns + + def _buffered_line(self) -> list[Scalar]: + """ + Return a line from buffer, filling buffer if required. + """ + if len(self.buf) > 0: + return self.buf[0] + else: + return self._next_line() + + def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]: + """ + Checks whether the file begins with the BOM character. + If it does, remove it. In addition, if there is quoting + in the field subsequent to the BOM, remove it as well + because it technically takes place at the beginning of + the name, not the middle of it. + """ + # first_row will be a list, so we need to check + # that that list is not empty before proceeding. + if not first_row: + return first_row + + # The first element of this row is the one that could have the + # BOM that we want to remove. Check that the first element is a + # string before proceeding. + if not isinstance(first_row[0], str): + return first_row + + # Check that the string is not empty, as that would + # obviously not have a BOM at the start of it. + if not first_row[0]: + return first_row + + # Since the string is non-empty, check that it does + # in fact begin with a BOM. + first_elt = first_row[0][0] + if first_elt != _BOM: + return first_row + + first_row_bom = first_row[0] + new_row: str + + if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar: + start = 2 + quote = first_row_bom[1] + end = first_row_bom[2:].index(quote) + 2 + + # Extract the data between the quotation marks + new_row = first_row_bom[start:end] + + # Extract any remaining data after the second + # quotation mark. + if len(first_row_bom) > end + 1: + new_row += first_row_bom[end + 1 :] + + else: + # No quotation so just remove BOM from first element + new_row = first_row_bom[1:] + + new_row_list: list[Scalar] = [new_row] + return new_row_list + first_row[1:] + + def _is_line_empty(self, line: list[Scalar]) -> bool: + """ + Check if a line is empty or not. + + Parameters + ---------- + line : str, array-like + The line of data to check. + + Returns + ------- + boolean : Whether or not the line is empty. + """ + return not line or all(not x for x in line) + + def _next_line(self) -> list[Scalar]: + if isinstance(self.data, list): + while self.skipfunc(self.pos): + if self.pos >= len(self.data): + break + self.pos += 1 + + while True: + try: + line = self._check_comments([self.data[self.pos]])[0] + self.pos += 1 + # either uncommented or blank to begin with + if not self.skip_blank_lines and ( + self._is_line_empty(self.data[self.pos - 1]) or line + ): + break + if self.skip_blank_lines: + ret = self._remove_empty_lines([line]) + if ret: + line = ret[0] + break + except IndexError: + raise StopIteration + else: + while self.skipfunc(self.pos): + self.pos += 1 + # assert for mypy, data is Iterator[str] or None, would error in next + assert self.data is not None + next(self.data) + + while True: + orig_line = self._next_iter_line(row_num=self.pos + 1) + self.pos += 1 + + if orig_line is not None: + line = self._check_comments([orig_line])[0] + + if self.skip_blank_lines: + ret = self._remove_empty_lines([line]) + + if ret: + line = ret[0] + break + elif self._is_line_empty(orig_line) or line: + break + + # This was the first line of the file, + # which could contain the BOM at the + # beginning of it. + if self.pos == 1: + line = self._check_for_bom(line) + + self.line_pos += 1 + self.buf.append(line) + return line + + def _alert_malformed(self, msg: str, row_num: int) -> None: + """ + Alert a user about a malformed row, depending on value of + `self.on_bad_lines` enum. + + If `self.on_bad_lines` is ERROR, the alert will be `ParserError`. + If `self.on_bad_lines` is WARN, the alert will be printed out. + + Parameters + ---------- + msg: str + The error message to display. + row_num: int + The row number where the parsing error occurred. + Because this row number is displayed, we 1-index, + even though we 0-index internally. + """ + if self.on_bad_lines == self.BadLineHandleMethod.ERROR: + raise ParserError(msg) + if self.on_bad_lines == self.BadLineHandleMethod.WARN: + warnings.warn( + f"Skipping line {row_num}: {msg}\n", + ParserWarning, + stacklevel=find_stack_level(), + ) + + def _next_iter_line(self, row_num: int) -> list[Scalar] | None: + """ + Wrapper around iterating through `self.data` (CSV source). + + When a CSV error is raised, we check for specific + error messages that allow us to customize the + error message displayed to the user. + + Parameters + ---------- + row_num: int + The row number of the line being parsed. + """ + try: + # assert for mypy, data is Iterator[str] or None, would error in next + assert self.data is not None + line = next(self.data) + # for mypy + assert isinstance(line, list) + return line + except csv.Error as e: + if self.on_bad_lines in ( + self.BadLineHandleMethod.ERROR, + self.BadLineHandleMethod.WARN, + ): + msg = str(e) + + if "NULL byte" in msg or "line contains NUL" in msg: + msg = ( + "NULL byte detected. This byte " + "cannot be processed in Python's " + "native csv library at the moment, " + "so please pass in engine='c' instead" + ) + + if self.skipfooter > 0: + reason = ( + "Error could possibly be due to " + "parsing errors in the skipped footer rows " + "(the skipfooter keyword is only applied " + "after Python's csv library has parsed " + "all rows)." + ) + msg += ". " + reason + + self._alert_malformed(msg, row_num) + return None + + def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + if self.comment is None: + return lines + ret = [] + for line in lines: + rl = [] + for x in line: + if ( + not isinstance(x, str) + or self.comment not in x + or x in self.na_values + ): + rl.append(x) + else: + x = x[: x.find(self.comment)] + if len(x) > 0: + rl.append(x) + break + ret.append(rl) + return ret + + def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + """ + Iterate through the lines and remove any that are + either empty or contain only one whitespace value + + Parameters + ---------- + lines : list of list of Scalars + The array of lines that we are to filter. + + Returns + ------- + filtered_lines : list of list of Scalars + The same array of lines with the "empty" ones removed. + """ + # Remove empty lines and lines with only one whitespace value + ret = [ + line + for line in lines + if ( + len(line) > 1 + or len(line) == 1 + and (not isinstance(line[0], str) or line[0].strip()) + ) + ] + return ret + + def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + if self.thousands is None: + return lines + + return self._search_replace_num_columns( + lines=lines, search=self.thousands, replace="" + ) + + def _search_replace_num_columns( + self, lines: list[list[Scalar]], search: str, replace: str + ) -> list[list[Scalar]]: + ret = [] + for line in lines: + rl = [] + for i, x in enumerate(line): + if ( + not isinstance(x, str) + or search not in x + or i in self._no_thousands_columns + or not self.num.search(x.strip()) + ): + rl.append(x) + else: + rl.append(x.replace(search, replace)) + ret.append(rl) + return ret + + def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + if self.decimal == parser_defaults["decimal"]: + return lines + + return self._search_replace_num_columns( + lines=lines, search=self.decimal, replace="." + ) + + def _clear_buffer(self) -> None: + self.buf = [] + + def _get_index_name( + self, + ) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]: + """ + Try several cases to get lines: + + 0) There are headers on row 0 and row 1 and their + total summed lengths equals the length of the next line. + Treat row 0 as columns and row 1 as indices + 1) Look for implicit index: there are more columns + on row 1 than row 0. If this is true, assume that row + 1 lists index columns and row 0 lists normal columns. + 2) Get index from the columns if it was listed. + """ + columns: Sequence[Hashable] = self.orig_names + orig_names = list(columns) + columns = list(columns) + + line: list[Scalar] | None + if self._header_line is not None: + line = self._header_line + else: + try: + line = self._next_line() + except StopIteration: + line = None + + next_line: list[Scalar] | None + try: + next_line = self._next_line() + except StopIteration: + next_line = None + + # implicitly index_col=0 b/c 1 fewer column names + implicit_first_cols = 0 + if line is not None: + # leave it 0, #2442 + # Case 1 + # error: Cannot determine type of 'index_col' + index_col = self.index_col # type: ignore[has-type] + if index_col is not False: + implicit_first_cols = len(line) - self.num_original_columns + + # Case 0 + if ( + next_line is not None + and self.header is not None + and index_col is not False + ): + if len(next_line) == len(line) + self.num_original_columns: + # column and index names on diff rows + self.index_col = list(range(len(line))) + self.buf = self.buf[1:] + + for c in reversed(line): + columns.insert(0, c) + + # Update list of original names to include all indices. + orig_names = list(columns) + self.num_original_columns = len(columns) + return line, orig_names, columns + + if implicit_first_cols > 0: + # Case 1 + self._implicit_index = True + if self.index_col is None: + self.index_col = list(range(implicit_first_cols)) + + index_name = None + + else: + # Case 2 + (index_name, _, self.index_col) = self._clean_index_names( + columns, self.index_col + ) + + return index_name, orig_names, columns + + def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]: + col_len = self.num_original_columns + + if self._implicit_index: + col_len += len(self.index_col) + + max_len = max(len(row) for row in content) + + # Check that there are no rows with too many + # elements in their row (rows with too few + # elements are padded with NaN). + # error: Non-overlapping identity check (left operand type: "List[int]", + # right operand type: "Literal[False]") + if ( + max_len > col_len + and self.index_col is not False # type: ignore[comparison-overlap] + and self.usecols is None + ): + footers = self.skipfooter if self.skipfooter else 0 + bad_lines = [] + + iter_content = enumerate(content) + content_len = len(content) + content = [] + + for i, _content in iter_content: + actual_len = len(_content) + + if actual_len > col_len: + if callable(self.on_bad_lines): + new_l = self.on_bad_lines(_content) + if new_l is not None: + content.append(new_l) + elif self.on_bad_lines in ( + self.BadLineHandleMethod.ERROR, + self.BadLineHandleMethod.WARN, + ): + row_num = self.pos - (content_len - i + footers) + bad_lines.append((row_num, actual_len)) + + if self.on_bad_lines == self.BadLineHandleMethod.ERROR: + break + else: + content.append(_content) + + for row_num, actual_len in bad_lines: + msg = ( + f"Expected {col_len} fields in line {row_num + 1}, saw " + f"{actual_len}" + ) + if ( + self.delimiter + and len(self.delimiter) > 1 + and self.quoting != csv.QUOTE_NONE + ): + # see gh-13374 + reason = ( + "Error could possibly be due to quotes being " + "ignored when a multi-char delimiter is used." + ) + msg += ". " + reason + + self._alert_malformed(msg, row_num + 1) + + # see gh-13320 + zipped_content = list(lib.to_object_array(content, min_width=col_len).T) + + if self.usecols: + assert self._col_indices is not None + col_indices = self._col_indices + + if self._implicit_index: + zipped_content = [ + a + for i, a in enumerate(zipped_content) + if ( + i < len(self.index_col) + or i - len(self.index_col) in col_indices + ) + ] + else: + zipped_content = [ + a for i, a in enumerate(zipped_content) if i in col_indices + ] + return zipped_content + + def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]: + lines = self.buf + new_rows = None + + # already fetched some number + if rows is not None: + # we already have the lines in the buffer + if len(self.buf) >= rows: + new_rows, self.buf = self.buf[:rows], self.buf[rows:] + + # need some lines + else: + rows -= len(self.buf) + + if new_rows is None: + if isinstance(self.data, list): + if self.pos > len(self.data): + raise StopIteration + if rows is None: + new_rows = self.data[self.pos :] + new_pos = len(self.data) + else: + new_rows = self.data[self.pos : self.pos + rows] + new_pos = self.pos + rows + + new_rows = self._remove_skipped_rows(new_rows) + lines.extend(new_rows) + self.pos = new_pos + + else: + new_rows = [] + try: + if rows is not None: + row_index = 0 + row_ct = 0 + offset = self.pos if self.pos is not None else 0 + while row_ct < rows: + # assert for mypy, data is Iterator[str] or None, would + # error in next + assert self.data is not None + new_row = next(self.data) + if not self.skipfunc(offset + row_index): + row_ct += 1 + row_index += 1 + new_rows.append(new_row) + + len_new_rows = len(new_rows) + new_rows = self._remove_skipped_rows(new_rows) + lines.extend(new_rows) + else: + rows = 0 + + while True: + next_row = self._next_iter_line(row_num=self.pos + rows + 1) + rows += 1 + + if next_row is not None: + new_rows.append(next_row) + len_new_rows = len(new_rows) + + except StopIteration: + len_new_rows = len(new_rows) + new_rows = self._remove_skipped_rows(new_rows) + lines.extend(new_rows) + if len(lines) == 0: + raise + self.pos += len_new_rows + + self.buf = [] + else: + lines = new_rows + + if self.skipfooter: + lines = lines[: -self.skipfooter] + + lines = self._check_comments(lines) + if self.skip_blank_lines: + lines = self._remove_empty_lines(lines) + lines = self._check_thousands(lines) + return self._check_decimal(lines) + + def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]: + if self.skiprows: + return [ + row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos) + ] + return new_rows + + def _set_no_thousand_columns(self) -> set[int]: + no_thousands_columns: set[int] = set() + if self.columns and self.parse_dates: + assert self._col_indices is not None + no_thousands_columns = self._set_noconvert_dtype_columns( + self._col_indices, self.columns + ) + if self.columns and self.dtype: + assert self._col_indices is not None + for i, col in zip(self._col_indices, self.columns): + if not isinstance(self.dtype, dict) and not is_numeric_dtype( + self.dtype + ): + no_thousands_columns.add(i) + if ( + isinstance(self.dtype, dict) + and col in self.dtype + and ( + not is_numeric_dtype(self.dtype[col]) + or is_bool_dtype(self.dtype[col]) + ) + ): + no_thousands_columns.add(i) + return no_thousands_columns + + +class FixedWidthReader(abc.Iterator): + """ + A reader of fixed-width lines. + """ + + def __init__( + self, + f: IO[str] | ReadCsvBuffer[str], + colspecs: list[tuple[int, int]] | Literal["infer"], + delimiter: str | None, + comment: str | None, + skiprows: set[int] | None = None, + infer_nrows: int = 100, + ) -> None: + self.f = f + self.buffer: Iterator | None = None + self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t " + self.comment = comment + if colspecs == "infer": + self.colspecs = self.detect_colspecs( + infer_nrows=infer_nrows, skiprows=skiprows + ) + else: + self.colspecs = colspecs + + if not isinstance(self.colspecs, (tuple, list)): + raise TypeError( + "column specifications must be a list or tuple, " + f"input was a {type(colspecs).__name__}" + ) + + for colspec in self.colspecs: + if not ( + isinstance(colspec, (tuple, list)) + and len(colspec) == 2 + and isinstance(colspec[0], (int, np.integer, type(None))) + and isinstance(colspec[1], (int, np.integer, type(None))) + ): + raise TypeError( + "Each column specification must be " + "2 element tuple or list of integers" + ) + + def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]: + """ + Read rows from self.f, skipping as specified. + + We distinguish buffer_rows (the first <= infer_nrows + lines) from the rows returned to detect_colspecs + because it's simpler to leave the other locations + with skiprows logic alone than to modify them to + deal with the fact we skipped some rows here as + well. + + Parameters + ---------- + infer_nrows : int + Number of rows to read from self.f, not counting + rows that are skipped. + skiprows: set, optional + Indices of rows to skip. + + Returns + ------- + detect_rows : list of str + A list containing the rows to read. + + """ + if skiprows is None: + skiprows = set() + buffer_rows = [] + detect_rows = [] + for i, row in enumerate(self.f): + if i not in skiprows: + detect_rows.append(row) + buffer_rows.append(row) + if len(detect_rows) >= infer_nrows: + break + self.buffer = iter(buffer_rows) + return detect_rows + + def detect_colspecs( + self, infer_nrows: int = 100, skiprows: set[int] | None = None + ) -> list[tuple[int, int]]: + # Regex escape the delimiters + delimiters = "".join([rf"\{x}" for x in self.delimiter]) + pattern = re.compile(f"([^{delimiters}]+)") + rows = self.get_rows(infer_nrows, skiprows) + if not rows: + raise EmptyDataError("No rows from which to infer column width") + max_len = max(map(len, rows)) + mask = np.zeros(max_len + 1, dtype=int) + if self.comment is not None: + rows = [row.partition(self.comment)[0] for row in rows] + for row in rows: + for m in pattern.finditer(row): + mask[m.start() : m.end()] = 1 + shifted = np.roll(mask, 1) + shifted[0] = 0 + edges = np.where((mask ^ shifted) == 1)[0] + edge_pairs = list(zip(edges[::2], edges[1::2])) + return edge_pairs + + def __next__(self) -> list[str]: + # Argument 1 to "next" has incompatible type "Union[IO[str], + # ReadCsvBuffer[str]]"; expected "SupportsNext[str]" + if self.buffer is not None: + try: + line = next(self.buffer) + except StopIteration: + self.buffer = None + line = next(self.f) # type: ignore[arg-type] + else: + line = next(self.f) # type: ignore[arg-type] + # Note: 'colspecs' is a sequence of half-open intervals. + return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs] + + +class FixedWidthFieldParser(PythonParser): + """ + Specialization that Converts fixed-width fields into DataFrames. + See PythonParser for details. + """ + + def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None: + # Support iterators, convert to a list. + self.colspecs = kwds.pop("colspecs") + self.infer_nrows = kwds.pop("infer_nrows") + PythonParser.__init__(self, f, **kwds) + + def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader: + return FixedWidthReader( + f, + self.colspecs, + self.delimiter, + self.comment, + self.skiprows, + self.infer_nrows, + ) + + def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + """ + Returns the list of lines without the empty ones. With fixed-width + fields, empty lines become arrays of empty strings. + + See PythonParser._remove_empty_lines. + """ + return [ + line + for line in lines + if any(not isinstance(e, str) or e.strip() for e in line) + ] + + +def count_empty_vals(vals) -> int: + return sum(1 for v in vals if v == "" or v is None) + + +def _validate_skipfooter_arg(skipfooter: int) -> int: + """ + Validate the 'skipfooter' parameter. + + Checks whether 'skipfooter' is a non-negative integer. + Raises a ValueError if that is not the case. + + Parameters + ---------- + skipfooter : non-negative integer + The number of rows to skip at the end of the file. + + Returns + ------- + validated_skipfooter : non-negative integer + The original input if the validation succeeds. + + Raises + ------ + ValueError : 'skipfooter' was not a non-negative integer. + """ + if not is_integer(skipfooter): + raise ValueError("skipfooter must be an integer") + + if skipfooter < 0: + raise ValueError("skipfooter cannot be negative") + + # Incompatible return value type (got "Union[int, integer[Any]]", expected "int") + return skipfooter # type: ignore[return-value] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/readers.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/readers.py new file mode 100644 index 0000000000000000000000000000000000000000..e04f27b56061030d19081d87439f0461fa53cc76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/parsers/readers.py @@ -0,0 +1,2383 @@ +""" +Module contains tools for processing files into DataFrames or other objects + +GH#48849 provides a convenient way of deprecating keyword arguments +""" +from __future__ import annotations + +from collections import ( + abc, + defaultdict, +) +import csv +import sys +from textwrap import fill +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Literal, + NamedTuple, + TypedDict, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs import lib +from pandas._libs.parsers import STR_NA_VALUES +from pandas.errors import ( + AbstractMethodError, + ParserWarning, +) +from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import ( + is_file_like, + is_float, + is_hashable, + is_integer, + is_list_like, + pandas_dtype, +) + +from pandas import Series +from pandas.core.frame import DataFrame +from pandas.core.indexes.api import RangeIndex +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + IOHandles, + get_handle, + stringify_path, + validate_header_arg, +) +from pandas.io.parsers.arrow_parser_wrapper import ArrowParserWrapper +from pandas.io.parsers.base_parser import ( + ParserBase, + is_index_col, + parser_defaults, +) +from pandas.io.parsers.c_parser_wrapper import CParserWrapper +from pandas.io.parsers.python_parser import ( + FixedWidthFieldParser, + PythonParser, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, + ) + from types import TracebackType + + from pandas._typing import ( + CompressionOptions, + CSVEngine, + DtypeArg, + DtypeBackend, + FilePath, + IndexLabel, + ReadCsvBuffer, + Self, + StorageOptions, + UsecolsArgType, + ) +_doc_read_csv_and_table = ( + r""" +{summary} + +Also supports optionally iterating or breaking of the file +into chunks. + +Additional help can be found in the online docs for +`IO Tools `_. + +Parameters +---------- +filepath_or_buffer : str, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is + expected. A local file could be: file://localhost/path/to/table.csv. + + If you want to pass in a path object, pandas accepts any ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, such as + a file handle (e.g. via builtin ``open`` function) or ``StringIO``. +sep : str, default {_default_sep} + Character or regex pattern to treat as the delimiter. If ``sep=None``, the + C engine cannot automatically detect + the separator, but the Python parsing engine can, meaning the latter will + be used and automatically detect the separator from only the first valid + row of the file by Python's builtin sniffer tool, ``csv.Sniffer``. + In addition, separators longer than 1 character and different from + ``'\s+'`` will be interpreted as regular expressions and will also force + the use of the Python parsing engine. Note that regex delimiters are prone + to ignoring quoted data. Regex example: ``'\r\t'``. +delimiter : str, optional + Alias for ``sep``. +header : int, Sequence of int, 'infer' or None, default 'infer' + Row number(s) containing column labels and marking the start of the + data (zero-indexed). Default behavior is to infer the column names: if no ``names`` + are passed the behavior is identical to ``header=0`` and column + names are inferred from the first line of the file, if column + names are passed explicitly to ``names`` then the behavior is identical to + ``header=None``. Explicitly pass ``header=0`` to be able to + replace existing names. The header can be a list of integers that + specify row locations for a :class:`~pandas.MultiIndex` on the columns + e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be + skipped (e.g. 2 in this example is skipped). Note that this + parameter ignores commented lines and empty lines if + ``skip_blank_lines=True``, so ``header=0`` denotes the first line of + data rather than the first line of the file. +names : Sequence of Hashable, optional + Sequence of column labels to apply. If the file contains a header row, + then you should explicitly pass ``header=0`` to override the column names. + Duplicates in this list are not allowed. +index_col : Hashable, Sequence of Hashable or False, optional + Column(s) to use as row label(s), denoted either by column labels or column + indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex` + will be formed for the row labels. + + Note: ``index_col=False`` can be used to force pandas to *not* use the first + column as the index, e.g., when you have a malformed file with delimiters at + the end of each line. +usecols : Sequence of Hashable or Callable, optional + Subset of columns to select, denoted either by column labels or column indices. + If list-like, all elements must either + be positional (i.e. integer indices into the document columns) or strings + that correspond to column names provided either by the user in ``names`` or + inferred from the document header row(s). If ``names`` are given, the document + header row(s) are not taken into account. For example, a valid list-like + ``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. + Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. + To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order + preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` + for columns in ``['foo', 'bar']`` order or + ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]`` + for ``['bar', 'foo']`` order. + + If callable, the callable function will be evaluated against the column + names, returning names where the callable function evaluates to ``True``. An + example of a valid callable argument would be ``lambda x: x.upper() in + ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster + parsing time and lower memory usage. +dtype : dtype or dict of {{Hashable : dtype}}, optional + Data type(s) to apply to either the whole dataset or individual columns. + E.g., ``{{'a': np.float64, 'b': np.int32, 'c': 'Int64'}}`` + Use ``str`` or ``object`` together with suitable ``na_values`` settings + to preserve and not interpret ``dtype``. + If ``converters`` are specified, they will be applied INSTEAD + of ``dtype`` conversion. + + .. versionadded:: 1.5.0 + + Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where + the default determines the ``dtype`` of the columns which are not explicitly + listed. +engine : {{'c', 'python', 'pyarrow'}}, optional + Parser engine to use. The C and pyarrow engines are faster, while the python engine + is currently more feature-complete. Multithreading is currently only supported by + the pyarrow engine. + + .. versionadded:: 1.4.0 + + The 'pyarrow' engine was added as an *experimental* engine, and some features + are unsupported, or may not work correctly, with this engine. +converters : dict of {{Hashable : Callable}}, optional + Functions for converting values in specified columns. Keys can either + be column labels or column indices. +true_values : list, optional + Values to consider as ``True`` in addition to case-insensitive variants of 'True'. +false_values : list, optional + Values to consider as ``False`` in addition to case-insensitive variants of 'False'. +skipinitialspace : bool, default False + Skip spaces after delimiter. +skiprows : int, list of int or Callable, optional + Line numbers to skip (0-indexed) or number of lines to skip (``int``) + at the start of the file. + + If callable, the callable function will be evaluated against the row + indices, returning ``True`` if the row should be skipped and ``False`` otherwise. + An example of a valid callable argument would be ``lambda x: x in [0, 2]``. +skipfooter : int, default 0 + Number of lines at bottom of file to skip (Unsupported with ``engine='c'``). +nrows : int, optional + Number of rows of file to read. Useful for reading pieces of large files. +na_values : Hashable, Iterable of Hashable or dict of {{Hashable : Iterable}}, optional + Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific + per-column ``NA`` values. By default the following values are interpreted as + ``NaN``: " """ + + fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ") + + """ ". + +keep_default_na : bool, default True + Whether or not to include the default ``NaN`` values when parsing the data. + Depending on whether ``na_values`` is passed in, the behavior is as follows: + + * If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values`` + is appended to the default ``NaN`` values used for parsing. + * If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only + the default ``NaN`` values are used for parsing. + * If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only + the ``NaN`` values specified ``na_values`` are used for parsing. + * If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no + strings will be parsed as ``NaN``. + + Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and + ``na_values`` parameters will be ignored. +na_filter : bool, default True + Detect missing value markers (empty strings and the value of ``na_values``). In + data without any ``NA`` values, passing ``na_filter=False`` can improve the + performance of reading a large file. +verbose : bool, default False + Indicate number of ``NA`` values placed in non-numeric columns. + + .. deprecated:: 2.2.0 +skip_blank_lines : bool, default True + If ``True``, skip over blank lines rather than interpreting as ``NaN`` values. +parse_dates : bool, list of Hashable, list of lists or dict of {{Hashable : list}}, \ +default False + The behavior is as follows: + + * ``bool``. If ``True`` -> try parsing the index. Note: Automatically set to + ``True`` if ``date_format`` or ``date_parser`` arguments have been passed. + * ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3 + each as a separate date column. + * ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse + as a single date column. Values are joined with a space before parsing. + * ``dict``, e.g. ``{{'foo' : [1, 3]}}`` -> parse columns 1, 3 as date and call + result 'foo'. Values are joined with a space before parsing. + + If a column or index cannot be represented as an array of ``datetime``, + say because of an unparsable value or a mixture of timezones, the column + or index will be returned unaltered as an ``object`` data type. For + non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after + :func:`~pandas.read_csv`. + + Note: A fast-path exists for iso8601-formatted dates. +infer_datetime_format : bool, default False + If ``True`` and ``parse_dates`` is enabled, pandas will attempt to infer the + format of the ``datetime`` strings in the columns, and if it can be inferred, + switch to a faster method of parsing them. In some cases this can increase + the parsing speed by 5-10x. + + .. deprecated:: 2.0.0 + A strict version of this argument is now the default, passing it has no effect. + +keep_date_col : bool, default False + If ``True`` and ``parse_dates`` specifies combining multiple columns then + keep the original columns. +date_parser : Callable, optional + Function to use for converting a sequence of string columns to an array of + ``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the + conversion. pandas will try to call ``date_parser`` in three different ways, + advancing to the next if an exception occurs: 1) Pass one or more arrays + (as defined by ``parse_dates``) as arguments; 2) concatenate (row-wise) the + string values from the columns defined by ``parse_dates`` into a single array + and pass that; and 3) call ``date_parser`` once for each row using one or + more strings (corresponding to the columns defined by ``parse_dates``) as + arguments. + + .. deprecated:: 2.0.0 + Use ``date_format`` instead, or read in as ``object`` and then apply + :func:`~pandas.to_datetime` as-needed. +date_format : str or dict of column -> format, optional + Format to use for parsing dates when used in conjunction with ``parse_dates``. + The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See + `strftime documentation + `_ for more information on choices, though + note that :const:`"%f"` will parse all the way up to nanoseconds. + You can also pass: + + - "ISO8601", to parse any `ISO8601 `_ + time string (not necessarily in exactly the same format); + - "mixed", to infer the format for each element individually. This is risky, + and you should probably use it along with `dayfirst`. + + .. versionadded:: 2.0.0 +dayfirst : bool, default False + DD/MM format dates, international and European format. +cache_dates : bool, default True + If ``True``, use a cache of unique, converted dates to apply the ``datetime`` + conversion. May produce significant speed-up when parsing duplicate + date strings, especially ones with timezone offsets. + +iterator : bool, default False + Return ``TextFileReader`` object for iteration or getting chunks with + ``get_chunk()``. +chunksize : int, optional + Number of lines to read from the file per chunk. Passing a value will cause the + function to return a ``TextFileReader`` object for iteration. + See the `IO Tools docs + `_ + for more information on ``iterator`` and ``chunksize``. + +{decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + +thousands : str (length 1), optional + Character acting as the thousands separator in numerical values. +decimal : str (length 1), default '.' + Character to recognize as decimal point (e.g., use ',' for European data). +lineterminator : str (length 1), optional + Character used to denote a line break. Only valid with C parser. +quotechar : str (length 1), optional + Character used to denote the start and end of a quoted item. Quoted + items can include the ``delimiter`` and it will be ignored. +quoting : {{0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, \ +3 or csv.QUOTE_NONE}}, default csv.QUOTE_MINIMAL + Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is + ``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special + characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``, + or ``lineterminator``. +doublequote : bool, default True + When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate + whether or not to interpret two consecutive ``quotechar`` elements INSIDE a + field as a single ``quotechar`` element. +escapechar : str (length 1), optional + Character used to escape other characters. +comment : str (length 1), optional + Character indicating that the remainder of line should not be parsed. + If found at the beginning + of a line, the line will be ignored altogether. This parameter must be a + single character. Like empty lines (as long as ``skip_blank_lines=True``), + fully commented lines are ignored by the parameter ``header`` but not by + ``skiprows``. For example, if ``comment='#'``, parsing + ``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being + treated as the header. +encoding : str, optional, default 'utf-8' + Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python + standard encodings + `_ . + +encoding_errors : str, optional, default 'strict' + How encoding errors are treated. `List of possible values + `_ . + + .. versionadded:: 1.3.0 + +dialect : str or csv.Dialect, optional + If provided, this parameter will override values (default or not) for the + following parameters: ``delimiter``, ``doublequote``, ``escapechar``, + ``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to + override values, a ``ParserWarning`` will be issued. See ``csv.Dialect`` + documentation for more details. +on_bad_lines : {{'error', 'warn', 'skip'}} or Callable, default 'error' + Specifies what to do upon encountering a bad line (a line with too many fields). + Allowed values are : + + - ``'error'``, raise an Exception when a bad line is encountered. + - ``'warn'``, raise a warning when a bad line is encountered and skip that line. + - ``'skip'``, skip bad lines without raising or warning when they are encountered. + + .. versionadded:: 1.3.0 + + .. versionadded:: 1.4.0 + + - Callable, function with signature + ``(bad_line: list[str]) -> list[str] | None`` that will process a single + bad line. ``bad_line`` is a list of strings split by the ``sep``. + If the function returns ``None``, the bad line will be ignored. + If the function returns a new ``list`` of strings with more elements than + expected, a ``ParserWarning`` will be emitted while dropping extra elements. + Only supported when ``engine='python'`` + + .. versionchanged:: 2.2.0 + + - Callable, function with signature + as described in `pyarrow documentation + `_ when ``engine='pyarrow'`` + +delim_whitespace : bool, default False + Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be + used as the ``sep`` delimiter. Equivalent to setting ``sep='\\s+'``. If this option + is set to ``True``, nothing should be passed in for the ``delimiter`` + parameter. + + .. deprecated:: 2.2.0 + Use ``sep="\\s+"`` instead. +low_memory : bool, default True + Internally process the file in chunks, resulting in lower memory use + while parsing, but possibly mixed type inference. To ensure no mixed + types either set ``False``, or specify the type with the ``dtype`` parameter. + Note that the entire file is read into a single :class:`~pandas.DataFrame` + regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in + chunks. (Only valid with C parser). +memory_map : bool, default False + If a filepath is provided for ``filepath_or_buffer``, map the file object + directly onto memory and access the data directly from there. Using this + option can improve performance because there is no longer any I/O overhead. +float_precision : {{'high', 'legacy', 'round_trip'}}, optional + Specifies which converter the C engine should use for floating-point + values. The options are ``None`` or ``'high'`` for the ordinary converter, + ``'legacy'`` for the original lower precision pandas converter, and + ``'round_trip'`` for the round-trip converter. + +{storage_options} + +dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + +Returns +------- +DataFrame or TextFileReader + A comma-separated values (csv) file is returned as two-dimensional + data structure with labeled axes. + +See Also +-------- +DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. +{see_also_func_name} : {see_also_func_summary} +read_fwf : Read a table of fixed-width formatted lines into DataFrame. + +Examples +-------- +>>> pd.{func_name}('data.csv') # doctest: +SKIP +""" +) + + +class _C_Parser_Defaults(TypedDict): + delim_whitespace: Literal[False] + na_filter: Literal[True] + low_memory: Literal[True] + memory_map: Literal[False] + float_precision: None + + +_c_parser_defaults: _C_Parser_Defaults = { + "delim_whitespace": False, + "na_filter": True, + "low_memory": True, + "memory_map": False, + "float_precision": None, +} + + +class _Fwf_Defaults(TypedDict): + colspecs: Literal["infer"] + infer_nrows: Literal[100] + widths: None + + +_fwf_defaults: _Fwf_Defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None} +_c_unsupported = {"skipfooter"} +_python_unsupported = {"low_memory", "float_precision"} +_pyarrow_unsupported = { + "skipfooter", + "float_precision", + "chunksize", + "comment", + "nrows", + "thousands", + "memory_map", + "dialect", + "delim_whitespace", + "quoting", + "lineterminator", + "converters", + "iterator", + "dayfirst", + "verbose", + "skipinitialspace", + "low_memory", +} + + +class _DeprecationConfig(NamedTuple): + default_value: Any + msg: str | None + + +@overload +def validate_integer(name: str, val: None, min_val: int = ...) -> None: + ... + + +@overload +def validate_integer(name: str, val: float, min_val: int = ...) -> int: + ... + + +@overload +def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None: + ... + + +def validate_integer( + name: str, val: int | float | None, min_val: int = 0 +) -> int | None: + """ + Checks whether the 'name' parameter for parsing is either + an integer OR float that can SAFELY be cast to an integer + without losing accuracy. Raises a ValueError if that is + not the case. + + Parameters + ---------- + name : str + Parameter name (used for error reporting) + val : int or float + The value to check + min_val : int + Minimum allowed value (val < min_val will result in a ValueError) + """ + if val is None: + return val + + msg = f"'{name:s}' must be an integer >={min_val:d}" + if is_float(val): + if int(val) != val: + raise ValueError(msg) + val = int(val) + elif not (is_integer(val) and val >= min_val): + raise ValueError(msg) + + return int(val) + + +def _validate_names(names: Sequence[Hashable] | None) -> None: + """ + Raise ValueError if the `names` parameter contains duplicates or has an + invalid data type. + + Parameters + ---------- + names : array-like or None + An array containing a list of the names used for the output DataFrame. + + Raises + ------ + ValueError + If names are not unique or are not ordered (e.g. set). + """ + if names is not None: + if len(names) != len(set(names)): + raise ValueError("Duplicate names are not allowed.") + if not ( + is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView) + ): + raise ValueError("Names should be an ordered collection.") + + +def _read( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds +) -> DataFrame | TextFileReader: + """Generic reader of line files.""" + # if we pass a date_parser and parse_dates=False, we should not parse the + # dates GH#44366 + if kwds.get("parse_dates", None) is None: + if ( + kwds.get("date_parser", lib.no_default) is lib.no_default + and kwds.get("date_format", None) is None + ): + kwds["parse_dates"] = False + else: + kwds["parse_dates"] = True + + # Extract some of the arguments (pass chunksize on). + iterator = kwds.get("iterator", False) + chunksize = kwds.get("chunksize", None) + if kwds.get("engine") == "pyarrow": + if iterator: + raise ValueError( + "The 'iterator' option is not supported with the 'pyarrow' engine" + ) + + if chunksize is not None: + raise ValueError( + "The 'chunksize' option is not supported with the 'pyarrow' engine" + ) + else: + chunksize = validate_integer("chunksize", chunksize, 1) + + nrows = kwds.get("nrows", None) + + # Check for duplicates in names. + _validate_names(kwds.get("names", None)) + + # Create the parser. + parser = TextFileReader(filepath_or_buffer, **kwds) + + if chunksize or iterator: + return parser + + with parser: + return parser.read(nrows) + + +# iterator=True -> TextFileReader +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[True], + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool | lib.NoDefault = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# chunksize=int -> TextFileReader +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool | lib.NoDefault = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# default case -> DataFrame +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[False] = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool | lib.NoDefault = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +# Unions -> DataFrame | TextFileReader +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool | lib.NoDefault = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame | TextFileReader: + ... + + +@Appender( + _doc_read_csv_and_table.format( + func_name="read_csv", + summary="Read a comma-separated values (csv) file into DataFrame.", + see_also_func_name="read_table", + see_also_func_summary="Read general delimited file into DataFrame.", + _default_sep="','", + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] + % "filepath_or_buffer", + ) +) +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = lib.no_default, + delimiter: str | None | lib.NoDefault = None, + # Column and Index Locations and Names + header: int | Sequence[int] | None | Literal["infer"] = "infer", + names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, + index_col: IndexLabel | Literal[False] | None = None, + usecols: UsecolsArgType = None, + # General Parsing Configuration + dtype: DtypeArg | None = None, + engine: CSVEngine | None = None, + converters: Mapping[Hashable, Callable] | None = None, + true_values: list | None = None, + false_values: list | None = None, + skipinitialspace: bool = False, + skiprows: list[int] | int | Callable[[Hashable], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + # NA and Missing Data Handling + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = None, + keep_default_na: bool = True, + na_filter: bool = True, + verbose: bool | lib.NoDefault = lib.no_default, + skip_blank_lines: bool = True, + # Datetime Handling + parse_dates: bool | Sequence[Hashable] | None = None, + infer_datetime_format: bool | lib.NoDefault = lib.no_default, + keep_date_col: bool | lib.NoDefault = lib.no_default, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: str | dict[Hashable, str] | None = None, + dayfirst: bool = False, + cache_dates: bool = True, + # Iteration + iterator: bool = False, + chunksize: int | None = None, + # Quoting, Compression, and File Format + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: int = csv.QUOTE_MINIMAL, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, + # Error Handling + on_bad_lines: str = "error", + # Internal + delim_whitespace: bool | lib.NoDefault = lib.no_default, + low_memory: bool = _c_parser_defaults["low_memory"], + memory_map: bool = False, + float_precision: Literal["high", "legacy"] | None = None, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | TextFileReader: + if keep_date_col is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'keep_date_col' keyword in pd.read_csv is deprecated and " + "will be removed in a future version. Explicitly remove unwanted " + "columns after parsing instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + keep_date_col = False + + if lib.is_list_like(parse_dates): + # GH#55569 + depr = False + # error: Item "bool" of "bool | Sequence[Hashable] | None" has no + # attribute "__iter__" (not iterable) + if not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr] + depr = True + elif isinstance(parse_dates, dict) and any( + lib.is_list_like(x) for x in parse_dates.values() + ): + depr = True + if depr: + warnings.warn( + "Support for nested sequences for 'parse_dates' in pd.read_csv " + "is deprecated. Combine the desired columns with pd.to_datetime " + "after parsing instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if infer_datetime_format is not lib.no_default: + warnings.warn( + "The argument 'infer_datetime_format' is deprecated and will " + "be removed in a future version. " + "A strict version of it is now the default, see " + "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. " + "You can safely remove this argument.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if delim_whitespace is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'delim_whitespace' keyword in pd.read_csv is deprecated and " + "will be removed in a future version. Use ``sep='\\s+'`` instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + delim_whitespace = False + + if verbose is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'verbose' keyword in pd.read_csv is deprecated and " + "will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + verbose = False + + # locals() should never be modified + kwds = locals().copy() + del kwds["filepath_or_buffer"] + del kwds["sep"] + + kwds_defaults = _refine_defaults_read( + dialect, + delimiter, + delim_whitespace, + engine, + sep, + on_bad_lines, + names, + defaults={"delimiter": ","}, + dtype_backend=dtype_backend, + ) + kwds.update(kwds_defaults) + + return _read(filepath_or_buffer, kwds) + + +# iterator=True -> TextFileReader +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[True], + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# chunksize=int -> TextFileReader +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# default -> DataFrame +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[False] = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +# Unions -> DataFrame | TextFileReader +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame | TextFileReader: + ... + + +@Appender( + _doc_read_csv_and_table.format( + func_name="read_table", + summary="Read general delimited file into DataFrame.", + see_also_func_name="read_csv", + see_also_func_summary=( + "Read a comma-separated values (csv) file into DataFrame." + ), + _default_sep=r"'\\t' (tab-stop)", + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] + % "filepath_or_buffer", + ) +) +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = lib.no_default, + delimiter: str | None | lib.NoDefault = None, + # Column and Index Locations and Names + header: int | Sequence[int] | None | Literal["infer"] = "infer", + names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, + index_col: IndexLabel | Literal[False] | None = None, + usecols: UsecolsArgType = None, + # General Parsing Configuration + dtype: DtypeArg | None = None, + engine: CSVEngine | None = None, + converters: Mapping[Hashable, Callable] | None = None, + true_values: list | None = None, + false_values: list | None = None, + skipinitialspace: bool = False, + skiprows: list[int] | int | Callable[[Hashable], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + # NA and Missing Data Handling + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + keep_default_na: bool = True, + na_filter: bool = True, + verbose: bool | lib.NoDefault = lib.no_default, + skip_blank_lines: bool = True, + # Datetime Handling + parse_dates: bool | Sequence[Hashable] = False, + infer_datetime_format: bool | lib.NoDefault = lib.no_default, + keep_date_col: bool | lib.NoDefault = lib.no_default, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: str | dict[Hashable, str] | None = None, + dayfirst: bool = False, + cache_dates: bool = True, + # Iteration + iterator: bool = False, + chunksize: int | None = None, + # Quoting, Compression, and File Format + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: int = csv.QUOTE_MINIMAL, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, + # Error Handling + on_bad_lines: str = "error", + # Internal + delim_whitespace: bool | lib.NoDefault = lib.no_default, + low_memory: bool = _c_parser_defaults["low_memory"], + memory_map: bool = False, + float_precision: str | None = None, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | TextFileReader: + if keep_date_col is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'keep_date_col' keyword in pd.read_table is deprecated and " + "will be removed in a future version. Explicitly remove unwanted " + "columns after parsing instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + keep_date_col = False + + # error: Item "bool" of "bool | Sequence[Hashable]" has no attribute "__iter__" + if lib.is_list_like(parse_dates) and not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr] + # GH#55569 + warnings.warn( + "Support for nested sequences for 'parse_dates' in pd.read_table " + "is deprecated. Combine the desired columns with pd.to_datetime " + "after parsing instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if infer_datetime_format is not lib.no_default: + warnings.warn( + "The argument 'infer_datetime_format' is deprecated and will " + "be removed in a future version. " + "A strict version of it is now the default, see " + "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. " + "You can safely remove this argument.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if delim_whitespace is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'delim_whitespace' keyword in pd.read_table is deprecated and " + "will be removed in a future version. Use ``sep='\\s+'`` instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + delim_whitespace = False + + if verbose is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'verbose' keyword in pd.read_table is deprecated and " + "will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + verbose = False + + # locals() should never be modified + kwds = locals().copy() + del kwds["filepath_or_buffer"] + del kwds["sep"] + + kwds_defaults = _refine_defaults_read( + dialect, + delimiter, + delim_whitespace, + engine, + sep, + on_bad_lines, + names, + defaults={"delimiter": "\t"}, + dtype_backend=dtype_backend, + ) + kwds.update(kwds_defaults) + + return _read(filepath_or_buffer, kwds) + + +@overload +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = ..., + widths: Sequence[int] | None = ..., + infer_nrows: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + iterator: Literal[True], + chunksize: int | None = ..., + **kwds, +) -> TextFileReader: + ... + + +@overload +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = ..., + widths: Sequence[int] | None = ..., + infer_nrows: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + iterator: bool = ..., + chunksize: int, + **kwds, +) -> TextFileReader: + ... + + +@overload +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = ..., + widths: Sequence[int] | None = ..., + infer_nrows: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + iterator: Literal[False] = ..., + chunksize: None = ..., + **kwds, +) -> DataFrame: + ... + + +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = "infer", + widths: Sequence[int] | None = None, + infer_nrows: int = 100, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + iterator: bool = False, + chunksize: int | None = None, + **kwds, +) -> DataFrame | TextFileReader: + r""" + Read a table of fixed-width formatted lines into DataFrame. + + Also supports optionally iterating or breaking of the file + into chunks. + + Additional help can be found in the `online docs for IO Tools + `_. + + Parameters + ---------- + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a text ``read()`` function.The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: + ``file://localhost/path/to/table.csv``. + colspecs : list of tuple (int, int) or 'infer'. optional + A list of tuples giving the extents of the fixed-width + fields of each line as half-open intervals (i.e., [from, to[ ). + String value 'infer' can be used to instruct the parser to try + detecting the column specifications from the first 100 rows of + the data which are not being skipped via skiprows (default='infer'). + widths : list of int, optional + A list of field widths which can be used instead of 'colspecs' if + the intervals are contiguous. + infer_nrows : int, default 100 + The number of rows to consider when letting the parser determine the + `colspecs`. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + **kwds : optional + Optional keyword arguments can be passed to ``TextFileReader``. + + Returns + ------- + DataFrame or TextFileReader + A comma-separated values (csv) file is returned as two-dimensional + data structure with labeled axes. + + See Also + -------- + DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. + read_csv : Read a comma-separated values (csv) file into DataFrame. + + Examples + -------- + >>> pd.read_fwf('data.csv') # doctest: +SKIP + """ + # Check input arguments. + if colspecs is None and widths is None: + raise ValueError("Must specify either colspecs or widths") + if colspecs not in (None, "infer") and widths is not None: + raise ValueError("You must specify only one of 'widths' and 'colspecs'") + + # Compute 'colspecs' from 'widths', if specified. + if widths is not None: + colspecs, col = [], 0 + for w in widths: + colspecs.append((col, col + w)) + col += w + + # for mypy + assert colspecs is not None + + # GH#40830 + # Ensure length of `colspecs` matches length of `names` + names = kwds.get("names") + if names is not None: + if len(names) != len(colspecs) and colspecs != "infer": + # need to check len(index_col) as it might contain + # unnamed indices, in which case it's name is not required + len_index = 0 + if kwds.get("index_col") is not None: + index_col: Any = kwds.get("index_col") + if index_col is not False: + if not is_list_like(index_col): + len_index = 1 + else: + len_index = len(index_col) + if kwds.get("usecols") is None and len(names) + len_index != len(colspecs): + # If usecols is used colspec may be longer than names + raise ValueError("Length of colspecs must match length of names") + + kwds["colspecs"] = colspecs + kwds["infer_nrows"] = infer_nrows + kwds["engine"] = "python-fwf" + kwds["iterator"] = iterator + kwds["chunksize"] = chunksize + + check_dtype_backend(dtype_backend) + kwds["dtype_backend"] = dtype_backend + return _read(filepath_or_buffer, kwds) + + +class TextFileReader(abc.Iterator): + """ + + Passed dialect overrides any of the related parser options + + """ + + def __init__( + self, + f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list, + engine: CSVEngine | None = None, + **kwds, + ) -> None: + if engine is not None: + engine_specified = True + else: + engine = "python" + engine_specified = False + self.engine = engine + self._engine_specified = kwds.get("engine_specified", engine_specified) + + _validate_skipfooter(kwds) + + dialect = _extract_dialect(kwds) + if dialect is not None: + if engine == "pyarrow": + raise ValueError( + "The 'dialect' option is not supported with the 'pyarrow' engine" + ) + kwds = _merge_with_dialect_properties(dialect, kwds) + + if kwds.get("header", "infer") == "infer": + kwds["header"] = 0 if kwds.get("names") is None else None + + self.orig_options = kwds + + # miscellanea + self._currow = 0 + + options = self._get_options_with_defaults(engine) + options["storage_options"] = kwds.get("storage_options", None) + + self.chunksize = options.pop("chunksize", None) + self.nrows = options.pop("nrows", None) + + self._check_file_or_buffer(f, engine) + self.options, self.engine = self._clean_options(options, engine) + + if "has_index_names" in kwds: + self.options["has_index_names"] = kwds["has_index_names"] + + self.handles: IOHandles | None = None + self._engine = self._make_engine(f, self.engine) + + def close(self) -> None: + if self.handles is not None: + self.handles.close() + self._engine.close() + + def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]: + kwds = self.orig_options + + options = {} + default: object | None + + for argname, default in parser_defaults.items(): + value = kwds.get(argname, default) + + # see gh-12935 + if ( + engine == "pyarrow" + and argname in _pyarrow_unsupported + and value != default + and value != getattr(value, "value", default) + ): + raise ValueError( + f"The {repr(argname)} option is not supported with the " + f"'pyarrow' engine" + ) + options[argname] = value + + for argname, default in _c_parser_defaults.items(): + if argname in kwds: + value = kwds[argname] + + if engine != "c" and value != default: + # TODO: Refactor this logic, its pretty convoluted + if "python" in engine and argname not in _python_unsupported: + pass + elif "pyarrow" in engine and argname not in _pyarrow_unsupported: + pass + else: + raise ValueError( + f"The {repr(argname)} option is not supported with the " + f"{repr(engine)} engine" + ) + else: + value = default + options[argname] = value + + if engine == "python-fwf": + for argname, default in _fwf_defaults.items(): + options[argname] = kwds.get(argname, default) + + return options + + def _check_file_or_buffer(self, f, engine: CSVEngine) -> None: + # see gh-16530 + if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"): + # The C engine doesn't need the file-like to have the "__iter__" + # attribute. However, the Python engine needs "__iter__(...)" + # when iterating through such an object, meaning it + # needs to have that attribute + raise ValueError( + "The 'python' engine cannot iterate through this file buffer." + ) + + def _clean_options( + self, options: dict[str, Any], engine: CSVEngine + ) -> tuple[dict[str, Any], CSVEngine]: + result = options.copy() + + fallback_reason = None + + # C engine not supported yet + if engine == "c": + if options["skipfooter"] > 0: + fallback_reason = "the 'c' engine does not support skipfooter" + engine = "python" + + sep = options["delimiter"] + delim_whitespace = options["delim_whitespace"] + + if sep is None and not delim_whitespace: + if engine in ("c", "pyarrow"): + fallback_reason = ( + f"the '{engine}' engine does not support " + "sep=None with delim_whitespace=False" + ) + engine = "python" + elif sep is not None and len(sep) > 1: + if engine == "c" and sep == r"\s+": + result["delim_whitespace"] = True + del result["delimiter"] + elif engine not in ("python", "python-fwf"): + # wait until regex engine integrated + fallback_reason = ( + f"the '{engine}' engine does not support " + "regex separators (separators > 1 char and " + r"different from '\s+' are interpreted as regex)" + ) + engine = "python" + elif delim_whitespace: + if "python" in engine: + result["delimiter"] = r"\s+" + elif sep is not None: + encodeable = True + encoding = sys.getfilesystemencoding() or "utf-8" + try: + if len(sep.encode(encoding)) > 1: + encodeable = False + except UnicodeDecodeError: + encodeable = False + if not encodeable and engine not in ("python", "python-fwf"): + fallback_reason = ( + f"the separator encoded in {encoding} " + f"is > 1 char long, and the '{engine}' engine " + "does not support such separators" + ) + engine = "python" + + quotechar = options["quotechar"] + if quotechar is not None and isinstance(quotechar, (str, bytes)): + if ( + len(quotechar) == 1 + and ord(quotechar) > 127 + and engine not in ("python", "python-fwf") + ): + fallback_reason = ( + "ord(quotechar) > 127, meaning the " + "quotechar is larger than one byte, " + f"and the '{engine}' engine does not support such quotechars" + ) + engine = "python" + + if fallback_reason and self._engine_specified: + raise ValueError(fallback_reason) + + if engine == "c": + for arg in _c_unsupported: + del result[arg] + + if "python" in engine: + for arg in _python_unsupported: + if fallback_reason and result[arg] != _c_parser_defaults.get(arg): + raise ValueError( + "Falling back to the 'python' engine because " + f"{fallback_reason}, but this causes {repr(arg)} to be " + "ignored as it is not supported by the 'python' engine." + ) + del result[arg] + + if fallback_reason: + warnings.warn( + ( + "Falling back to the 'python' engine because " + f"{fallback_reason}; you can avoid this warning by specifying " + "engine='python'." + ), + ParserWarning, + stacklevel=find_stack_level(), + ) + + index_col = options["index_col"] + names = options["names"] + converters = options["converters"] + na_values = options["na_values"] + skiprows = options["skiprows"] + + validate_header_arg(options["header"]) + + if index_col is True: + raise ValueError("The value of index_col couldn't be 'True'") + if is_index_col(index_col): + if not isinstance(index_col, (list, tuple, np.ndarray)): + index_col = [index_col] + result["index_col"] = index_col + + names = list(names) if names is not None else names + + # type conversion-related + if converters is not None: + if not isinstance(converters, dict): + raise TypeError( + "Type converters must be a dict or subclass, " + f"input was a {type(converters).__name__}" + ) + else: + converters = {} + + # Converting values to NA + keep_default_na = options["keep_default_na"] + floatify = engine != "pyarrow" + na_values, na_fvalues = _clean_na_values( + na_values, keep_default_na, floatify=floatify + ) + + # handle skiprows; this is internally handled by the + # c-engine, so only need for python and pyarrow parsers + if engine == "pyarrow": + if not is_integer(skiprows) and skiprows is not None: + # pyarrow expects skiprows to be passed as an integer + raise ValueError( + "skiprows argument must be an integer when using " + "engine='pyarrow'" + ) + else: + if is_integer(skiprows): + skiprows = list(range(skiprows)) + if skiprows is None: + skiprows = set() + elif not callable(skiprows): + skiprows = set(skiprows) + + # put stuff back + result["names"] = names + result["converters"] = converters + result["na_values"] = na_values + result["na_fvalues"] = na_fvalues + result["skiprows"] = skiprows + + return result, engine + + def __next__(self) -> DataFrame: + try: + return self.get_chunk() + except StopIteration: + self.close() + raise + + def _make_engine( + self, + f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list | IO, + engine: CSVEngine = "c", + ) -> ParserBase: + mapping: dict[str, type[ParserBase]] = { + "c": CParserWrapper, + "python": PythonParser, + "pyarrow": ArrowParserWrapper, + "python-fwf": FixedWidthFieldParser, + } + if engine not in mapping: + raise ValueError( + f"Unknown engine: {engine} (valid options are {mapping.keys()})" + ) + if not isinstance(f, list): + # open file here + is_text = True + mode = "r" + if engine == "pyarrow": + is_text = False + mode = "rb" + elif ( + engine == "c" + and self.options.get("encoding", "utf-8") == "utf-8" + and isinstance(stringify_path(f), str) + ): + # c engine can decode utf-8 bytes, adding TextIOWrapper makes + # the c-engine especially for memory_map=True far slower + is_text = False + if "b" not in mode: + mode += "b" + self.handles = get_handle( + f, + mode, + encoding=self.options.get("encoding", None), + compression=self.options.get("compression", None), + memory_map=self.options.get("memory_map", False), + is_text=is_text, + errors=self.options.get("encoding_errors", "strict"), + storage_options=self.options.get("storage_options", None), + ) + assert self.handles is not None + f = self.handles.handle + + elif engine != "python": + msg = f"Invalid file path or buffer object type: {type(f)}" + raise ValueError(msg) + + try: + return mapping[engine](f, **self.options) + except Exception: + if self.handles is not None: + self.handles.close() + raise + + def _failover_to_python(self) -> None: + raise AbstractMethodError(self) + + def read(self, nrows: int | None = None) -> DataFrame: + if self.engine == "pyarrow": + try: + # error: "ParserBase" has no attribute "read" + df = self._engine.read() # type: ignore[attr-defined] + except Exception: + self.close() + raise + else: + nrows = validate_integer("nrows", nrows) + try: + # error: "ParserBase" has no attribute "read" + ( + index, + columns, + col_dict, + ) = self._engine.read( # type: ignore[attr-defined] + nrows + ) + except Exception: + self.close() + raise + + if index is None: + if col_dict: + # Any column is actually fine: + new_rows = len(next(iter(col_dict.values()))) + index = RangeIndex(self._currow, self._currow + new_rows) + else: + new_rows = 0 + else: + new_rows = len(index) + + if hasattr(self, "orig_options"): + dtype_arg = self.orig_options.get("dtype", None) + else: + dtype_arg = None + + if isinstance(dtype_arg, dict): + dtype = defaultdict(lambda: None) # type: ignore[var-annotated] + dtype.update(dtype_arg) + elif dtype_arg is not None and pandas_dtype(dtype_arg) in ( + np.str_, + np.object_, + ): + dtype = defaultdict(lambda: dtype_arg) + else: + dtype = None + + if dtype is not None: + new_col_dict = {} + for k, v in col_dict.items(): + d = ( + dtype[k] + if pandas_dtype(dtype[k]) in (np.str_, np.object_) + else None + ) + new_col_dict[k] = Series(v, index=index, dtype=d, copy=False) + else: + new_col_dict = col_dict + + df = DataFrame( + new_col_dict, + columns=columns, + index=index, + copy=not using_copy_on_write(), + ) + + self._currow += new_rows + return df + + def get_chunk(self, size: int | None = None) -> DataFrame: + if size is None: + size = self.chunksize + if self.nrows is not None: + if self._currow >= self.nrows: + raise StopIteration + size = min(size, self.nrows - self._currow) + return self.read(nrows=size) + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + +def TextParser(*args, **kwds) -> TextFileReader: + """ + Converts lists of lists/tuples into DataFrames with proper type inference + and optional (e.g. string to datetime) conversion. Also enables iterating + lazily over chunks of large files + + Parameters + ---------- + data : file-like object or list + delimiter : separator character to use + dialect : str or csv.Dialect instance, optional + Ignored if delimiter is longer than 1 character + names : sequence, default + header : int, default 0 + Row to use to parse column labels. Defaults to the first row. Prior + rows will be discarded + index_col : int or list, optional + Column or columns to use as the (possibly hierarchical) index + has_index_names: bool, default False + True if the cols defined in index_col have an index name and are + not in the header. + na_values : scalar, str, list-like, or dict, optional + Additional strings to recognize as NA/NaN. + keep_default_na : bool, default True + thousands : str, optional + Thousands separator + comment : str, optional + Comment out remainder of line + parse_dates : bool, default False + keep_date_col : bool, default False + date_parser : function, optional + + .. deprecated:: 2.0.0 + date_format : str or dict of column -> format, default ``None`` + + .. versionadded:: 2.0.0 + skiprows : list of integers + Row numbers to skip + skipfooter : int + Number of line at bottom of file to skip + converters : dict, optional + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels, values are functions that take one + input argument, the cell (not column) content, and return the + transformed content. + encoding : str, optional + Encoding to use for UTF when reading/writing (ex. 'utf-8') + float_precision : str, optional + Specifies which converter the C engine should use for floating-point + values. The options are `None` or `high` for the ordinary converter, + `legacy` for the original lower precision pandas converter, and + `round_trip` for the round-trip converter. + """ + kwds["engine"] = "python" + return TextFileReader(*args, **kwds) + + +def _clean_na_values(na_values, keep_default_na: bool = True, floatify: bool = True): + na_fvalues: set | dict + if na_values is None: + if keep_default_na: + na_values = STR_NA_VALUES + else: + na_values = set() + na_fvalues = set() + elif isinstance(na_values, dict): + old_na_values = na_values.copy() + na_values = {} # Prevent aliasing. + + # Convert the values in the na_values dictionary + # into array-likes for further use. This is also + # where we append the default NaN values, provided + # that `keep_default_na=True`. + for k, v in old_na_values.items(): + if not is_list_like(v): + v = [v] + + if keep_default_na: + v = set(v) | STR_NA_VALUES + + na_values[k] = v + na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()} + else: + if not is_list_like(na_values): + na_values = [na_values] + na_values = _stringify_na_values(na_values, floatify) + if keep_default_na: + na_values = na_values | STR_NA_VALUES + + na_fvalues = _floatify_na_values(na_values) + + return na_values, na_fvalues + + +def _floatify_na_values(na_values): + # create float versions of the na_values + result = set() + for v in na_values: + try: + v = float(v) + if not np.isnan(v): + result.add(v) + except (TypeError, ValueError, OverflowError): + pass + return result + + +def _stringify_na_values(na_values, floatify: bool): + """return a stringified and numeric for these values""" + result: list[str | float] = [] + for x in na_values: + result.append(str(x)) + result.append(x) + try: + v = float(x) + + # we are like 999 here + if v == int(v): + v = int(v) + result.append(f"{v}.0") + result.append(str(v)) + + if floatify: + result.append(v) + except (TypeError, ValueError, OverflowError): + pass + if floatify: + try: + result.append(int(x)) + except (TypeError, ValueError, OverflowError): + pass + return set(result) + + +def _refine_defaults_read( + dialect: str | csv.Dialect | None, + delimiter: str | None | lib.NoDefault, + delim_whitespace: bool, + engine: CSVEngine | None, + sep: str | None | lib.NoDefault, + on_bad_lines: str | Callable, + names: Sequence[Hashable] | None | lib.NoDefault, + defaults: dict[str, Any], + dtype_backend: DtypeBackend | lib.NoDefault, +): + """Validate/refine default values of input parameters of read_csv, read_table. + + Parameters + ---------- + dialect : str or csv.Dialect + If provided, this parameter will override values (default or not) for the + following parameters: `delimiter`, `doublequote`, `escapechar`, + `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to + override values, a ParserWarning will be issued. See csv.Dialect + documentation for more details. + delimiter : str or object + Alias for sep. + delim_whitespace : bool + Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be + used as the sep. Equivalent to setting ``sep='\\s+'``. If this option + is set to True, nothing should be passed in for the ``delimiter`` + parameter. + + .. deprecated:: 2.2.0 + Use ``sep="\\s+"`` instead. + engine : {{'c', 'python'}} + Parser engine to use. The C engine is faster while the python engine is + currently more feature-complete. + sep : str or object + A delimiter provided by the user (str) or a sentinel value, i.e. + pandas._libs.lib.no_default. + on_bad_lines : str, callable + An option for handling bad lines or a sentinel value(None). + names : array-like, optional + List of column names to use. If the file contains a header row, + then you should explicitly pass ``header=0`` to override the column names. + Duplicates in this list are not allowed. + defaults: dict + Default values of input parameters. + + Returns + ------- + kwds : dict + Input parameters with correct values. + + Raises + ------ + ValueError : + If a delimiter was specified with ``sep`` (or ``delimiter``) and + ``delim_whitespace=True``. + """ + # fix types for sep, delimiter to Union(str, Any) + delim_default = defaults["delimiter"] + kwds: dict[str, Any] = {} + # gh-23761 + # + # When a dialect is passed, it overrides any of the overlapping + # parameters passed in directly. We don't want to warn if the + # default parameters were passed in (since it probably means + # that the user didn't pass them in explicitly in the first place). + # + # "delimiter" is the annoying corner case because we alias it to + # "sep" before doing comparison to the dialect values later on. + # Thus, we need a flag to indicate that we need to "override" + # the comparison to dialect values by checking if default values + # for BOTH "delimiter" and "sep" were provided. + if dialect is not None: + kwds["sep_override"] = delimiter is None and ( + sep is lib.no_default or sep == delim_default + ) + + if delimiter and (sep is not lib.no_default): + raise ValueError("Specified a sep and a delimiter; you can only specify one.") + + kwds["names"] = None if names is lib.no_default else names + + # Alias sep -> delimiter. + if delimiter is None: + delimiter = sep + + if delim_whitespace and (delimiter is not lib.no_default): + raise ValueError( + "Specified a delimiter with both sep and " + "delim_whitespace=True; you can only specify one." + ) + + if delimiter == "\n": + raise ValueError( + r"Specified \n as separator or delimiter. This forces the python engine " + "which does not accept a line terminator. Hence it is not allowed to use " + "the line terminator as separator.", + ) + + if delimiter is lib.no_default: + # assign default separator value + kwds["delimiter"] = delim_default + else: + kwds["delimiter"] = delimiter + + if engine is not None: + kwds["engine_specified"] = True + else: + kwds["engine"] = "c" + kwds["engine_specified"] = False + + if on_bad_lines == "error": + kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR + elif on_bad_lines == "warn": + kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN + elif on_bad_lines == "skip": + kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP + elif callable(on_bad_lines): + if engine not in ["python", "pyarrow"]: + raise ValueError( + "on_bad_line can only be a callable function " + "if engine='python' or 'pyarrow'" + ) + kwds["on_bad_lines"] = on_bad_lines + else: + raise ValueError(f"Argument {on_bad_lines} is invalid for on_bad_lines") + + check_dtype_backend(dtype_backend) + + kwds["dtype_backend"] = dtype_backend + + return kwds + + +def _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None: + """ + Extract concrete csv dialect instance. + + Returns + ------- + csv.Dialect or None + """ + if kwds.get("dialect") is None: + return None + + dialect = kwds["dialect"] + if dialect in csv.list_dialects(): + dialect = csv.get_dialect(dialect) + + _validate_dialect(dialect) + + return dialect + + +MANDATORY_DIALECT_ATTRS = ( + "delimiter", + "doublequote", + "escapechar", + "skipinitialspace", + "quotechar", + "quoting", +) + + +def _validate_dialect(dialect: csv.Dialect) -> None: + """ + Validate csv dialect instance. + + Raises + ------ + ValueError + If incorrect dialect is provided. + """ + for param in MANDATORY_DIALECT_ATTRS: + if not hasattr(dialect, param): + raise ValueError(f"Invalid dialect {dialect} provided") + + +def _merge_with_dialect_properties( + dialect: csv.Dialect, + defaults: dict[str, Any], +) -> dict[str, Any]: + """ + Merge default kwargs in TextFileReader with dialect parameters. + + Parameters + ---------- + dialect : csv.Dialect + Concrete csv dialect. See csv.Dialect documentation for more details. + defaults : dict + Keyword arguments passed to TextFileReader. + + Returns + ------- + kwds : dict + Updated keyword arguments, merged with dialect parameters. + """ + kwds = defaults.copy() + + for param in MANDATORY_DIALECT_ATTRS: + dialect_val = getattr(dialect, param) + + parser_default = parser_defaults[param] + provided = kwds.get(param, parser_default) + + # Messages for conflicting values between the dialect + # instance and the actual parameters provided. + conflict_msgs = [] + + # Don't warn if the default parameter was passed in, + # even if it conflicts with the dialect (gh-23761). + if provided not in (parser_default, dialect_val): + msg = ( + f"Conflicting values for '{param}': '{provided}' was " + f"provided, but the dialect specifies '{dialect_val}'. " + "Using the dialect-specified value." + ) + + # Annoying corner case for not warning about + # conflicts between dialect and delimiter parameter. + # Refer to the outer "_read_" function for more info. + if not (param == "delimiter" and kwds.pop("sep_override", False)): + conflict_msgs.append(msg) + + if conflict_msgs: + warnings.warn( + "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level() + ) + kwds[param] = dialect_val + return kwds + + +def _validate_skipfooter(kwds: dict[str, Any]) -> None: + """ + Check whether skipfooter is compatible with other kwargs in TextFileReader. + + Parameters + ---------- + kwds : dict + Keyword arguments passed to TextFileReader. + + Raises + ------ + ValueError + If skipfooter is not compatible with other parameters. + """ + if kwds.get("skipfooter"): + if kwds.get("iterator") or kwds.get("chunksize"): + raise ValueError("'skipfooter' not supported for iteration") + if kwds.get("nrows"): + raise ValueError("'skipfooter' not supported with 'nrows'") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88ccab18facf3a2addcbd42988affc3db3a525f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dddbc2f4b5048970e2f6f86ee6da342e2b02831 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_types.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b87c8c544dab0621c25a8d26262f73def13757bb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_types.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_api.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..60bcb97aaa3642be064bcacd130edf2084c4a55c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_api.py @@ -0,0 +1,383 @@ +from __future__ import annotations + +import pytest + +import pandas as pd +from pandas import api +import pandas._testing as tm +from pandas.api import ( + extensions as api_extensions, + indexers as api_indexers, + interchange as api_interchange, + types as api_types, + typing as api_typing, +) + + +class Base: + def check(self, namespace, expected, ignored=None): + # see which names are in the namespace, minus optional + # ignored ones + # compare vs the expected + + result = sorted( + f for f in dir(namespace) if not f.startswith("__") and f != "annotations" + ) + if ignored is not None: + result = sorted(set(result) - set(ignored)) + + expected = sorted(expected) + tm.assert_almost_equal(result, expected) + + +class TestPDApi(Base): + # these are optionally imported based on testing + # & need to be ignored + ignored = ["tests", "locale", "conftest", "_version_meson"] + + # top-level sub-packages + public_lib = [ + "api", + "arrays", + "options", + "test", + "testing", + "errors", + "plotting", + "io", + "tseries", + ] + private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"] + + # misc + misc = ["IndexSlice", "NaT", "NA"] + + # top-level classes + classes = [ + "ArrowDtype", + "Categorical", + "CategoricalIndex", + "DataFrame", + "DateOffset", + "DatetimeIndex", + "ExcelFile", + "ExcelWriter", + "Flags", + "Grouper", + "HDFStore", + "Index", + "MultiIndex", + "Period", + "PeriodIndex", + "RangeIndex", + "Series", + "SparseDtype", + "StringDtype", + "Timedelta", + "TimedeltaIndex", + "Timestamp", + "Interval", + "IntervalIndex", + "CategoricalDtype", + "PeriodDtype", + "IntervalDtype", + "DatetimeTZDtype", + "BooleanDtype", + "Int8Dtype", + "Int16Dtype", + "Int32Dtype", + "Int64Dtype", + "UInt8Dtype", + "UInt16Dtype", + "UInt32Dtype", + "UInt64Dtype", + "Float32Dtype", + "Float64Dtype", + "NamedAgg", + ] + + # these are already deprecated; awaiting removal + deprecated_classes: list[str] = [] + + # external modules exposed in pandas namespace + modules: list[str] = [] + + # top-level functions + funcs = [ + "array", + "bdate_range", + "concat", + "crosstab", + "cut", + "date_range", + "interval_range", + "eval", + "factorize", + "get_dummies", + "from_dummies", + "infer_freq", + "isna", + "isnull", + "lreshape", + "melt", + "notna", + "notnull", + "offsets", + "merge", + "merge_ordered", + "merge_asof", + "period_range", + "pivot", + "pivot_table", + "qcut", + "show_versions", + "timedelta_range", + "unique", + "value_counts", + "wide_to_long", + ] + + # top-level option funcs + funcs_option = [ + "reset_option", + "describe_option", + "get_option", + "option_context", + "set_option", + "set_eng_float_format", + ] + + # top-level read_* funcs + funcs_read = [ + "read_clipboard", + "read_csv", + "read_excel", + "read_fwf", + "read_gbq", + "read_hdf", + "read_html", + "read_xml", + "read_json", + "read_pickle", + "read_sas", + "read_sql", + "read_sql_query", + "read_sql_table", + "read_stata", + "read_table", + "read_feather", + "read_parquet", + "read_orc", + "read_spss", + ] + + # top-level json funcs + funcs_json = ["json_normalize"] + + # top-level to_* funcs + funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"] + + # top-level to deprecate in the future + deprecated_funcs_in_future: list[str] = [] + + # these are already deprecated; awaiting removal + deprecated_funcs: list[str] = [] + + # private modules in pandas namespace + private_modules = [ + "_config", + "_libs", + "_is_numpy_dev", + "_pandas_datetime_CAPI", + "_pandas_parser_CAPI", + "_testing", + "_typing", + ] + if not pd._built_with_meson: + private_modules.append("_version") + + def test_api(self): + checkthese = ( + self.public_lib + + self.private_lib + + self.misc + + self.modules + + self.classes + + self.funcs + + self.funcs_option + + self.funcs_read + + self.funcs_json + + self.funcs_to + + self.private_modules + ) + self.check(namespace=pd, expected=checkthese, ignored=self.ignored) + + def test_api_all(self): + expected = set( + self.public_lib + + self.misc + + self.modules + + self.classes + + self.funcs + + self.funcs_option + + self.funcs_read + + self.funcs_json + + self.funcs_to + ) - set(self.deprecated_classes) + actual = set(pd.__all__) + + extraneous = actual - expected + assert not extraneous + + missing = expected - actual + assert not missing + + def test_depr(self): + deprecated_list = ( + self.deprecated_classes + + self.deprecated_funcs + + self.deprecated_funcs_in_future + ) + for depr in deprecated_list: + with tm.assert_produces_warning(FutureWarning): + _ = getattr(pd, depr) + + +class TestApi(Base): + allowed_api_dirs = [ + "types", + "extensions", + "indexers", + "interchange", + "typing", + ] + allowed_typing = [ + "DataFrameGroupBy", + "DatetimeIndexResamplerGroupby", + "Expanding", + "ExpandingGroupby", + "ExponentialMovingWindow", + "ExponentialMovingWindowGroupby", + "JsonReader", + "NaTType", + "NAType", + "PeriodIndexResamplerGroupby", + "Resampler", + "Rolling", + "RollingGroupby", + "SeriesGroupBy", + "StataReader", + "TimedeltaIndexResamplerGroupby", + "TimeGrouper", + "Window", + ] + allowed_api_types = [ + "is_any_real_numeric_dtype", + "is_array_like", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_dict_like", + "is_dtype_equal", + "is_extension_array_dtype", + "is_file_like", + "is_float", + "is_float_dtype", + "is_hashable", + "is_int64_dtype", + "is_integer", + "is_integer_dtype", + "is_interval", + "is_interval_dtype", + "is_iterator", + "is_list_like", + "is_named_tuple", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_period_dtype", + "is_re", + "is_re_compilable", + "is_scalar", + "is_signed_integer_dtype", + "is_sparse", + "is_string_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "pandas_dtype", + "infer_dtype", + "union_categoricals", + "CategoricalDtype", + "DatetimeTZDtype", + "IntervalDtype", + "PeriodDtype", + ] + allowed_api_interchange = ["from_dataframe", "DataFrame"] + allowed_api_indexers = [ + "check_array_indexer", + "BaseIndexer", + "FixedForwardWindowIndexer", + "VariableOffsetWindowIndexer", + ] + allowed_api_extensions = [ + "no_default", + "ExtensionDtype", + "register_extension_dtype", + "register_dataframe_accessor", + "register_index_accessor", + "register_series_accessor", + "take", + "ExtensionArray", + "ExtensionScalarOpsMixin", + ] + + def test_api(self): + self.check(api, self.allowed_api_dirs) + + def test_api_typing(self): + self.check(api_typing, self.allowed_typing) + + def test_api_types(self): + self.check(api_types, self.allowed_api_types) + + def test_api_interchange(self): + self.check(api_interchange, self.allowed_api_interchange) + + def test_api_indexers(self): + self.check(api_indexers, self.allowed_api_indexers) + + def test_api_extensions(self): + self.check(api_extensions, self.allowed_api_extensions) + + +class TestTesting(Base): + funcs = [ + "assert_frame_equal", + "assert_series_equal", + "assert_index_equal", + "assert_extension_array_equal", + ] + + def test_testing(self): + from pandas import testing + + self.check(testing, self.funcs) + + def test_util_in_top_level(self): + with pytest.raises(AttributeError, match="foo"): + pd.util.foo + + +def test_pandas_array_alias(): + msg = "PandasArray has been renamed NumpyExtensionArray" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = pd.arrays.PandasArray + + assert res is pd.arrays.NumpyExtensionArray diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_types.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_types.py new file mode 100644 index 0000000000000000000000000000000000000000..fbaa6e7e18bcaa9a574b741b5361818f1be01ecf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/api/test_types.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import pandas._testing as tm +from pandas.api import types +from pandas.tests.api.test_api import Base + + +class TestTypes(Base): + allowed = [ + "is_any_real_numeric_dtype", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_dtype_equal", + "is_float", + "is_float_dtype", + "is_int64_dtype", + "is_integer", + "is_integer_dtype", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_scalar", + "is_sparse", + "is_string_dtype", + "is_signed_integer_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "is_period_dtype", + "is_interval", + "is_interval_dtype", + "is_re", + "is_re_compilable", + "is_dict_like", + "is_iterator", + "is_file_like", + "is_list_like", + "is_hashable", + "is_array_like", + "is_named_tuple", + "pandas_dtype", + "union_categoricals", + "infer_dtype", + "is_extension_array_dtype", + ] + deprecated: list[str] = [] + dtypes = ["CategoricalDtype", "DatetimeTZDtype", "PeriodDtype", "IntervalDtype"] + + def test_types(self): + self.check(types, self.allowed + self.dtypes + self.deprecated) + + def test_deprecated_from_api_types(self): + for t in self.deprecated: + with tm.assert_produces_warning(FutureWarning): + getattr(types, t)(1) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_algos.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_algos.py new file mode 100644 index 0000000000000000000000000000000000000000..d4c19a4970135cfb1865eaa0fae0845dc7d17971 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_algos.py @@ -0,0 +1,89 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize("ordered", [True, False]) +@pytest.mark.parametrize("categories", [["b", "a", "c"], ["a", "b", "c", "d"]]) +def test_factorize(categories, ordered): + cat = pd.Categorical( + ["b", "b", "a", "c", None], categories=categories, ordered=ordered + ) + codes, uniques = pd.factorize(cat) + expected_codes = np.array([0, 0, 1, 2, -1], dtype=np.intp) + expected_uniques = pd.Categorical( + ["b", "a", "c"], categories=categories, ordered=ordered + ) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_factorized_sort(): + cat = pd.Categorical(["b", "b", None, "a"]) + codes, uniques = pd.factorize(cat, sort=True) + expected_codes = np.array([1, 1, -1, 0], dtype=np.intp) + expected_uniques = pd.Categorical(["a", "b"]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_factorized_sort_ordered(): + cat = pd.Categorical( + ["b", "b", None, "a"], categories=["c", "b", "a"], ordered=True + ) + + codes, uniques = pd.factorize(cat, sort=True) + expected_codes = np.array([0, 0, -1, 1], dtype=np.intp) + expected_uniques = pd.Categorical( + ["b", "a"], categories=["c", "b", "a"], ordered=True + ) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_isin_cats(): + # GH2003 + cat = pd.Categorical(["a", "b", np.nan]) + + result = cat.isin(["a", np.nan]) + expected = np.array([True, False, True], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + result = cat.isin(["a", "c"]) + expected = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + +@pytest.mark.parametrize("value", [[""], [None, ""], [pd.NaT, ""]]) +def test_isin_cats_corner_cases(value): + # GH36550 + cat = pd.Categorical([""]) + result = cat.isin(value) + expected = np.array([True], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + +@pytest.mark.parametrize("empty", [[], pd.Series(dtype=object), np.array([])]) +def test_isin_empty(empty): + s = pd.Categorical(["a", "b"]) + expected = np.array([False, False], dtype=bool) + + result = s.isin(empty) + tm.assert_numpy_array_equal(expected, result) + + +def test_diff(): + ser = pd.Series([1, 2, 3], dtype="category") + + msg = "Convert to a suitable dtype" + with pytest.raises(TypeError, match=msg): + ser.diff() + + df = ser.to_frame(name="A") + with pytest.raises(TypeError, match=msg): + df.diff() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_analytics.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_analytics.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c53fbc4637ed60dc92914f6e2ca74d5e0bdfe9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_analytics.py @@ -0,0 +1,349 @@ +import re +import sys + +import numpy as np +import pytest + +from pandas.compat import PYPY + +from pandas import ( + Categorical, + CategoricalDtype, + DataFrame, + Index, + NaT, + Series, + date_range, +) +import pandas._testing as tm +from pandas.api.types import is_scalar + + +class TestCategoricalAnalytics: + @pytest.mark.parametrize("aggregation", ["min", "max"]) + def test_min_max_not_ordered_raises(self, aggregation): + # unordered cats have no min/max + cat = Categorical(["a", "b", "c", "d"], ordered=False) + msg = f"Categorical is not ordered for operation {aggregation}" + agg_func = getattr(cat, aggregation) + + with pytest.raises(TypeError, match=msg): + agg_func() + + ufunc = np.minimum if aggregation == "min" else np.maximum + with pytest.raises(TypeError, match=msg): + ufunc.reduce(cat) + + def test_min_max_ordered(self, index_or_series_or_array): + cat = Categorical(["a", "b", "c", "d"], ordered=True) + obj = index_or_series_or_array(cat) + _min = obj.min() + _max = obj.max() + assert _min == "a" + assert _max == "d" + + assert np.minimum.reduce(obj) == "a" + assert np.maximum.reduce(obj) == "d" + # TODO: raises if we pass axis=0 (on Index and Categorical, not Series) + + cat = Categorical( + ["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True + ) + obj = index_or_series_or_array(cat) + _min = obj.min() + _max = obj.max() + assert _min == "d" + assert _max == "a" + assert np.minimum.reduce(obj) == "d" + assert np.maximum.reduce(obj) == "a" + + def test_min_max_reduce(self): + # GH52788 + cat = Categorical(["a", "b", "c", "d"], ordered=True) + df = DataFrame(cat) + + result_max = df.agg("max") + expected_max = Series(Categorical(["d"], dtype=cat.dtype)) + tm.assert_series_equal(result_max, expected_max) + + result_min = df.agg("min") + expected_min = Series(Categorical(["a"], dtype=cat.dtype)) + tm.assert_series_equal(result_min, expected_min) + + @pytest.mark.parametrize( + "categories,expected", + [ + (list("ABC"), np.nan), + ([1, 2, 3], np.nan), + pytest.param( + Series(date_range("2020-01-01", periods=3), dtype="category"), + NaT, + marks=pytest.mark.xfail( + reason="https://github.com/pandas-dev/pandas/issues/29962" + ), + ), + ], + ) + @pytest.mark.parametrize("aggregation", ["min", "max"]) + def test_min_max_ordered_empty(self, categories, expected, aggregation): + # GH 30227 + cat = Categorical([], categories=categories, ordered=True) + + agg_func = getattr(cat, aggregation) + result = agg_func() + assert result is expected + + @pytest.mark.parametrize( + "values, categories", + [(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])], + ) + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("function", ["min", "max"]) + def test_min_max_with_nan(self, values, categories, function, skipna): + # GH 25303 + cat = Categorical(values, categories=categories, ordered=True) + result = getattr(cat, function)(skipna=skipna) + + if skipna is False: + assert result is np.nan + else: + expected = categories[0] if function == "min" else categories[2] + assert result == expected + + @pytest.mark.parametrize("function", ["min", "max"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_min_max_only_nan(self, function, skipna): + # https://github.com/pandas-dev/pandas/issues/33450 + cat = Categorical([np.nan], categories=[1, 2], ordered=True) + result = getattr(cat, function)(skipna=skipna) + assert result is np.nan + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_numeric_only_min_max_raises(self, method): + # GH 25303 + cat = Categorical( + [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True + ) + with pytest.raises(TypeError, match=".* got an unexpected keyword"): + getattr(cat, method)(numeric_only=True) + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_numpy_min_max_raises(self, method): + cat = Categorical(["a", "b", "c", "b"], ordered=False) + msg = ( + f"Categorical is not ordered for operation {method}\n" + "you can use .as_ordered() to change the Categorical to an ordered one" + ) + method = getattr(np, method) + with pytest.raises(TypeError, match=re.escape(msg)): + method(cat) + + @pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"]) + @pytest.mark.parametrize("method", ["min", "max"]) + def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg): + cat = Categorical(["a", "b", "c", "b"], ordered=True) + msg = ( + f"the '{kwarg}' parameter is not supported in the pandas implementation " + f"of {method}" + ) + if kwarg == "axis": + msg = r"`axis` must be fewer than the number of dimensions \(1\)" + kwargs = {kwarg: 42} + method = getattr(np, method) + with pytest.raises(ValueError, match=msg): + method(cat, **kwargs) + + @pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")]) + def test_numpy_min_max_axis_equals_none(self, method, expected): + cat = Categorical(["a", "b", "c", "b"], ordered=True) + method = getattr(np, method) + result = method(cat, axis=None) + assert result == expected + + @pytest.mark.parametrize( + "values,categories,exp_mode", + [ + ([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]), + ([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]), + ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]), + ([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]), + ([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]), + ([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]), + ], + ) + def test_mode(self, values, categories, exp_mode): + cat = Categorical(values, categories=categories, ordered=True) + res = Series(cat).mode()._values + exp = Categorical(exp_mode, categories=categories, ordered=True) + tm.assert_categorical_equal(res, exp) + + def test_searchsorted(self, ordered): + # https://github.com/pandas-dev/pandas/issues/8420 + # https://github.com/pandas-dev/pandas/issues/14522 + + cat = Categorical( + ["cheese", "milk", "apple", "bread", "bread"], + categories=["cheese", "milk", "apple", "bread"], + ordered=ordered, + ) + ser = Series(cat) + + # Searching for single item argument, side='left' (default) + res_cat = cat.searchsorted("apple") + assert res_cat == 2 + assert is_scalar(res_cat) + + res_ser = ser.searchsorted("apple") + assert res_ser == 2 + assert is_scalar(res_ser) + + # Searching for single item array, side='left' (default) + res_cat = cat.searchsorted(["bread"]) + res_ser = ser.searchsorted(["bread"]) + exp = np.array([3], dtype=np.intp) + tm.assert_numpy_array_equal(res_cat, exp) + tm.assert_numpy_array_equal(res_ser, exp) + + # Searching for several items array, side='right' + res_cat = cat.searchsorted(["apple", "bread"], side="right") + res_ser = ser.searchsorted(["apple", "bread"], side="right") + exp = np.array([3, 5], dtype=np.intp) + tm.assert_numpy_array_equal(res_cat, exp) + tm.assert_numpy_array_equal(res_ser, exp) + + # Searching for a single value that is not from the Categorical + with pytest.raises(TypeError, match="cucumber"): + cat.searchsorted("cucumber") + with pytest.raises(TypeError, match="cucumber"): + ser.searchsorted("cucumber") + + # Searching for multiple values one of each is not from the Categorical + msg = ( + "Cannot setitem on a Categorical with a new category, " + "set the categories first" + ) + with pytest.raises(TypeError, match=msg): + cat.searchsorted(["bread", "cucumber"]) + with pytest.raises(TypeError, match=msg): + ser.searchsorted(["bread", "cucumber"]) + + def test_unique(self, ordered): + # GH38140 + dtype = CategoricalDtype(["a", "b", "c"], ordered=ordered) + + # categories are reordered based on value when ordered=False + cat = Categorical(["a", "b", "c"], dtype=dtype) + res = cat.unique() + tm.assert_categorical_equal(res, cat) + + cat = Categorical(["a", "b", "a", "a"], dtype=dtype) + res = cat.unique() + tm.assert_categorical_equal(res, Categorical(["a", "b"], dtype=dtype)) + + cat = Categorical(["c", "a", "b", "a", "a"], dtype=dtype) + res = cat.unique() + exp_cat = Categorical(["c", "a", "b"], dtype=dtype) + tm.assert_categorical_equal(res, exp_cat) + + # nan must be removed + cat = Categorical(["b", np.nan, "b", np.nan, "a"], dtype=dtype) + res = cat.unique() + exp_cat = Categorical(["b", np.nan, "a"], dtype=dtype) + tm.assert_categorical_equal(res, exp_cat) + + def test_unique_index_series(self, ordered): + # GH38140 + dtype = CategoricalDtype([3, 2, 1], ordered=ordered) + + c = Categorical([3, 1, 2, 2, 1], dtype=dtype) + # Categorical.unique sorts categories by appearance order + # if ordered=False + exp = Categorical([3, 1, 2], dtype=dtype) + tm.assert_categorical_equal(c.unique(), exp) + + tm.assert_index_equal(Index(c).unique(), Index(exp)) + tm.assert_categorical_equal(Series(c).unique(), exp) + + c = Categorical([1, 1, 2, 2], dtype=dtype) + exp = Categorical([1, 2], dtype=dtype) + tm.assert_categorical_equal(c.unique(), exp) + tm.assert_index_equal(Index(c).unique(), Index(exp)) + tm.assert_categorical_equal(Series(c).unique(), exp) + + def test_shift(self): + # GH 9416 + cat = Categorical(["a", "b", "c", "d", "a"]) + + # shift forward + sp1 = cat.shift(1) + xp1 = Categorical([np.nan, "a", "b", "c", "d"]) + tm.assert_categorical_equal(sp1, xp1) + tm.assert_categorical_equal(cat[:-1], sp1[1:]) + + # shift back + sn2 = cat.shift(-2) + xp2 = Categorical( + ["c", "d", "a", np.nan, np.nan], categories=["a", "b", "c", "d"] + ) + tm.assert_categorical_equal(sn2, xp2) + tm.assert_categorical_equal(cat[2:], sn2[:-2]) + + # shift by zero + tm.assert_categorical_equal(cat, cat.shift(0)) + + def test_nbytes(self): + cat = Categorical([1, 2, 3]) + exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories + assert cat.nbytes == exp + + def test_memory_usage(self): + cat = Categorical([1, 2, 3]) + + # .categories is an index, so we include the hashtable + assert 0 < cat.nbytes <= cat.memory_usage() + assert 0 < cat.nbytes <= cat.memory_usage(deep=True) + + cat = Categorical(["foo", "foo", "bar"]) + assert cat.memory_usage(deep=True) > cat.nbytes + + if not PYPY: + # sys.getsizeof will call the .memory_usage with + # deep=True, and add on some GC overhead + diff = cat.memory_usage(deep=True) - sys.getsizeof(cat) + assert abs(diff) < 100 + + def test_map(self): + c = Categorical(list("ABABC"), categories=list("CBA"), ordered=True) + result = c.map(lambda x: x.lower(), na_action=None) + exp = Categorical(list("ababc"), categories=list("cba"), ordered=True) + tm.assert_categorical_equal(result, exp) + + c = Categorical(list("ABABC"), categories=list("ABC"), ordered=False) + result = c.map(lambda x: x.lower(), na_action=None) + exp = Categorical(list("ababc"), categories=list("abc"), ordered=False) + tm.assert_categorical_equal(result, exp) + + result = c.map(lambda x: 1, na_action=None) + # GH 12766: Return an index not an array + tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64))) + + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_inplace_raises(self, value): + cat = Categorical(["A", "B", "B", "C", "A"]) + msg = ( + 'For argument "inplace" expected type bool, ' + f"received type {type(value).__name__}" + ) + + with pytest.raises(ValueError, match=msg): + cat.sort_values(inplace=value) + + def test_quantile_empty(self): + # make sure we have correct itemsize on resulting codes + cat = Categorical(["A", "B"]) + idx = Index([0.0, 0.5]) + result = cat[:0]._quantile(idx, interpolation="linear") + assert result._codes.dtype == np.int8 + + expected = cat.take([-1, -1], allow_fill=True) + tm.assert_extension_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_missing.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_missing.py new file mode 100644 index 0000000000000000000000000000000000000000..0eeb01b74608890daf81fef083adb29e797e57ce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_missing.py @@ -0,0 +1,216 @@ +import collections + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Index, + Series, + isna, +) +import pandas._testing as tm + + +class TestCategoricalMissing: + def test_isna(self): + exp = np.array([False, False, True]) + cat = Categorical(["a", "b", np.nan]) + res = cat.isna() + + tm.assert_numpy_array_equal(res, exp) + + def test_na_flags_int_categories(self): + # #1457 + + categories = list(range(10)) + labels = np.random.default_rng(2).integers(0, 10, 20) + labels[::5] = -1 + + cat = Categorical(labels, categories) + repr(cat) + + tm.assert_numpy_array_equal(isna(cat), labels == -1) + + def test_nan_handling(self): + # Nans are represented as -1 in codes + c = Categorical(["a", "b", np.nan, "a"]) + tm.assert_index_equal(c.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8)) + c[1] = np.nan + tm.assert_index_equal(c.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8)) + + # Adding nan to categories should make assigned nan point to the + # category! + c = Categorical(["a", "b", np.nan, "a"]) + tm.assert_index_equal(c.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8)) + + def test_set_dtype_nans(self): + c = Categorical(["a", "b", np.nan]) + result = c._set_dtype(CategoricalDtype(["a", "c"])) + tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8")) + + def test_set_item_nan(self): + cat = Categorical([1, 2, 3]) + cat[1] = np.nan + + exp = Categorical([1, np.nan, 3], categories=[1, 2, 3]) + tm.assert_categorical_equal(cat, exp) + + @pytest.mark.parametrize( + "fillna_kwargs, msg", + [ + ( + {"value": 1, "method": "ffill"}, + "Cannot specify both 'value' and 'method'.", + ), + ({}, "Must specify a fill 'value' or 'method'."), + ({"method": "bad"}, "Invalid fill method. Expecting .* bad"), + ( + {"value": Series([1, 2, 3, 4, "a"])}, + "Cannot setitem on a Categorical with a new category", + ), + ], + ) + def test_fillna_raises(self, fillna_kwargs, msg): + # https://github.com/pandas-dev/pandas/issues/19682 + # https://github.com/pandas-dev/pandas/issues/13628 + cat = Categorical([1, 2, 3, None, None]) + + if len(fillna_kwargs) == 1 and "value" in fillna_kwargs: + err = TypeError + else: + err = ValueError + + with pytest.raises(err, match=msg): + cat.fillna(**fillna_kwargs) + + @pytest.mark.parametrize("named", [True, False]) + def test_fillna_iterable_category(self, named): + # https://github.com/pandas-dev/pandas/issues/21097 + if named: + Point = collections.namedtuple("Point", "x y") + else: + Point = lambda *args: args # tuple + cat = Categorical(np.array([Point(0, 0), Point(0, 1), None], dtype=object)) + result = cat.fillna(Point(0, 0)) + expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)]) + + tm.assert_categorical_equal(result, expected) + + # Case where the Point is not among our categories; we want ValueError, + # not NotImplementedError GH#41914 + cat = Categorical(np.array([Point(1, 0), Point(0, 1), None], dtype=object)) + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + cat.fillna(Point(0, 0)) + + def test_fillna_array(self): + # accept Categorical or ndarray value if it holds appropriate values + cat = Categorical(["A", "B", "C", None, None]) + + other = cat.fillna("C") + result = cat.fillna(other) + tm.assert_categorical_equal(result, other) + assert isna(cat[-1]) # didn't modify original inplace + + other = np.array(["A", "B", "C", "B", "A"]) + result = cat.fillna(other) + expected = Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype) + tm.assert_categorical_equal(result, expected) + assert isna(cat[-1]) # didn't modify original inplace + + @pytest.mark.parametrize( + "values, expected", + [ + ([1, 2, 3], np.array([False, False, False])), + ([1, 2, np.nan], np.array([False, False, True])), + ([1, 2, np.inf], np.array([False, False, True])), + ([1, 2, pd.NA], np.array([False, False, True])), + ], + ) + def test_use_inf_as_na(self, values, expected): + # https://github.com/pandas-dev/pandas/issues/33594 + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + cat = Categorical(values) + result = cat.isna() + tm.assert_numpy_array_equal(result, expected) + + result = Series(cat).isna() + expected = Series(expected) + tm.assert_series_equal(result, expected) + + result = DataFrame(cat).isna() + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "values, expected", + [ + ([1, 2, 3], np.array([False, False, False])), + ([1, 2, np.nan], np.array([False, False, True])), + ([1, 2, np.inf], np.array([False, False, True])), + ([1, 2, pd.NA], np.array([False, False, True])), + ], + ) + def test_use_inf_as_na_outside_context(self, values, expected): + # https://github.com/pandas-dev/pandas/issues/33594 + # Using isna directly for Categorical will fail in general here + cat = Categorical(values) + + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + result = isna(cat) + tm.assert_numpy_array_equal(result, expected) + + result = isna(Series(cat)) + expected = Series(expected) + tm.assert_series_equal(result, expected) + + result = isna(DataFrame(cat)) + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "a1, a2, categories", + [ + (["a", "b", "c"], [np.nan, "a", "b"], ["a", "b", "c"]), + ([1, 2, 3], [np.nan, 1, 2], [1, 2, 3]), + ], + ) + def test_compare_categorical_with_missing(self, a1, a2, categories): + # GH 28384 + cat_type = CategoricalDtype(categories) + + # != + result = Series(a1, dtype=cat_type) != Series(a2, dtype=cat_type) + expected = Series(a1) != Series(a2) + tm.assert_series_equal(result, expected) + + # == + result = Series(a1, dtype=cat_type) == Series(a2, dtype=cat_type) + expected = Series(a1) == Series(a2) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "na_value, dtype", + [ + (pd.NaT, "datetime64[ns]"), + (None, "float64"), + (np.nan, "float64"), + (pd.NA, "float64"), + ], + ) + def test_categorical_only_missing_values_no_cast(self, na_value, dtype): + # GH#44900 + result = Categorical([na_value, na_value]) + tm.assert_index_equal(result.categories, Index([], dtype=dtype)) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_replace.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_replace.py new file mode 100644 index 0000000000000000000000000000000000000000..3c677142846d73f7cfd08c6681ff0d7814b55bd1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_replace.py @@ -0,0 +1,111 @@ +import pytest + +import pandas as pd +from pandas import Categorical +import pandas._testing as tm + + +@pytest.mark.parametrize( + "to_replace,value,expected,flip_categories", + [ + # one-to-one + (1, 2, [2, 2, 3], False), + (1, 4, [4, 2, 3], False), + (4, 1, [1, 2, 3], False), + (5, 6, [1, 2, 3], False), + # many-to-one + ([1], 2, [2, 2, 3], False), + ([1, 2], 3, [3, 3, 3], False), + ([1, 2], 4, [4, 4, 3], False), + ((1, 2, 4), 5, [5, 5, 3], False), + ((5, 6), 2, [1, 2, 3], False), + ([1], [2], [2, 2, 3], False), + ([1, 4], [5, 2], [5, 2, 3], False), + # GH49404: overlap between to_replace and value + ([1, 2, 3], [2, 3, 4], [2, 3, 4], False), + # GH50872, GH46884: replace with null + (1, None, [None, 2, 3], False), + (1, pd.NA, [None, 2, 3], False), + # check_categorical sorts categories, which crashes on mixed dtypes + (3, "4", [1, 2, "4"], False), + ([1, 2, "3"], "5", ["5", "5", 3], True), + ], +) +@pytest.mark.filterwarnings( + "ignore:.*with CategoricalDtype is deprecated:FutureWarning" +) +def test_replace_categorical_series(to_replace, value, expected, flip_categories): + # GH 31720 + + ser = pd.Series([1, 2, 3], dtype="category") + result = ser.replace(to_replace, value) + expected = pd.Series(expected, dtype="category") + ser.replace(to_replace, value, inplace=True) + + if flip_categories: + expected = expected.cat.set_categories(expected.cat.categories[::-1]) + + tm.assert_series_equal(expected, result, check_category_order=False) + tm.assert_series_equal(expected, ser, check_category_order=False) + + +@pytest.mark.parametrize( + "to_replace, value, result, expected_error_msg", + [ + ("b", "c", ["a", "c"], "Categorical.categories are different"), + ("c", "d", ["a", "b"], None), + # https://github.com/pandas-dev/pandas/issues/33288 + ("a", "a", ["a", "b"], None), + ("b", None, ["a", None], "Categorical.categories length are different"), + ], +) +def test_replace_categorical(to_replace, value, result, expected_error_msg): + # GH#26988 + cat = Categorical(["a", "b"]) + expected = Categorical(result) + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + warn = FutureWarning if expected_error_msg is not None else None + with tm.assert_produces_warning(warn, match=msg): + result = pd.Series(cat, copy=False).replace(to_replace, value)._values + + tm.assert_categorical_equal(result, expected) + if to_replace == "b": # the "c" test is supposed to be unchanged + with pytest.raises(AssertionError, match=expected_error_msg): + # ensure non-inplace call does not affect original + tm.assert_categorical_equal(cat, expected) + + ser = pd.Series(cat, copy=False) + with tm.assert_produces_warning(warn, match=msg): + ser.replace(to_replace, value, inplace=True) + tm.assert_categorical_equal(cat, expected) + + +def test_replace_categorical_ea_dtype(): + # GH49404 + cat = Categorical(pd.array(["a", "b"], dtype="string")) + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.Series(cat).replace(["a", "b"], ["c", pd.NA])._values + expected = Categorical(pd.array(["c", pd.NA], dtype="string")) + tm.assert_categorical_equal(result, expected) + + +def test_replace_maintain_ordering(): + # GH51016 + dtype = pd.CategoricalDtype([0, 1, 2], ordered=True) + ser = pd.Series([0, 1, 2], dtype=dtype) + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.replace(0, 2) + expected_dtype = pd.CategoricalDtype([1, 2], ordered=True) + expected = pd.Series([2, 1, 2], dtype=expected_dtype) + tm.assert_series_equal(expected, result, check_category_order=True) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_repr.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..ef0315130215cc762e2fad6fc07a97c9f4b94eb8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_repr.py @@ -0,0 +1,550 @@ +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, + Series, + date_range, + option_context, + period_range, + timedelta_range, +) + + +class TestCategoricalReprWithFactor: + def test_print(self, using_infer_string): + factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + if using_infer_string: + expected = [ + "['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']", + "Categories (3, string): [a < b < c]", + ] + else: + expected = [ + "['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']", + "Categories (3, object): ['a' < 'b' < 'c']", + ] + expected = "\n".join(expected) + actual = repr(factor) + assert actual == expected + + +class TestCategoricalRepr: + def test_big_print(self): + codes = np.array([0, 1, 2, 0, 1, 2] * 100) + dtype = CategoricalDtype(categories=Index(["a", "b", "c"], dtype=object)) + factor = Categorical.from_codes(codes, dtype=dtype) + expected = [ + "['a', 'b', 'c', 'a', 'b', ..., 'b', 'c', 'a', 'b', 'c']", + "Length: 600", + "Categories (3, object): ['a', 'b', 'c']", + ] + expected = "\n".join(expected) + + actual = repr(factor) + + assert actual == expected + + def test_empty_print(self): + factor = Categorical([], Index(["a", "b", "c"], dtype=object)) + expected = "[], Categories (3, object): ['a', 'b', 'c']" + actual = repr(factor) + assert actual == expected + + assert expected == actual + factor = Categorical([], Index(["a", "b", "c"], dtype=object), ordered=True) + expected = "[], Categories (3, object): ['a' < 'b' < 'c']" + actual = repr(factor) + assert expected == actual + + factor = Categorical([], []) + expected = "[], Categories (0, object): []" + assert expected == repr(factor) + + def test_print_none_width(self): + # GH10087 + a = Series(Categorical([1, 2, 3, 4])) + exp = ( + "0 1\n1 2\n2 3\n3 4\n" + "dtype: category\nCategories (4, int64): [1, 2, 3, 4]" + ) + + with option_context("display.width", None): + assert exp == repr(a) + + @pytest.mark.skipif( + using_pyarrow_string_dtype(), + reason="Change once infer_string is set to True by default", + ) + def test_unicode_print(self): + c = Categorical(["aaaaa", "bb", "cccc"] * 20) + expected = """\ +['aaaaa', 'bb', 'cccc', 'aaaaa', 'bb', ..., 'bb', 'cccc', 'aaaaa', 'bb', 'cccc'] +Length: 60 +Categories (3, object): ['aaaaa', 'bb', 'cccc']""" + + assert repr(c) == expected + + c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20) + expected = """\ +['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう'] +Length: 60 +Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501 + + assert repr(c) == expected + + # unicode option should not affect to Categorical, as it doesn't care + # the repr width + with option_context("display.unicode.east_asian_width", True): + c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20) + expected = """['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう'] +Length: 60 +Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501 + + assert repr(c) == expected + + def test_categorical_repr(self): + c = Categorical([1, 2, 3]) + exp = """[1, 2, 3] +Categories (3, int64): [1, 2, 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3]) + exp = """[1, 2, 3, 1, 2, 3] +Categories (3, int64): [1, 2, 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 4, 5] * 10) + exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] +Length: 50 +Categories (5, int64): [1, 2, 3, 4, 5]""" + + assert repr(c) == exp + + c = Categorical(np.arange(20, dtype=np.int64)) + exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] +Length: 20 +Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]""" + + assert repr(c) == exp + + def test_categorical_repr_ordered(self): + c = Categorical([1, 2, 3], ordered=True) + exp = """[1, 2, 3] +Categories (3, int64): [1 < 2 < 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True) + exp = """[1, 2, 3, 1, 2, 3] +Categories (3, int64): [1 < 2 < 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 4, 5] * 10, ordered=True) + exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] +Length: 50 +Categories (5, int64): [1 < 2 < 3 < 4 < 5]""" + + assert repr(c) == exp + + c = Categorical(np.arange(20, dtype=np.int64), ordered=True) + exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] +Length: 20 +Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]""" + + assert repr(c) == exp + + def test_categorical_repr_datetime(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + c = Categorical(idx) + + exp = ( + "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, " + "2011-01-01 12:00:00, 2011-01-01 13:00:00]\n" + "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, " + "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n" + " 2011-01-01 12:00:00, " + "2011-01-01 13:00:00]" + "" + ) + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = ( + "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, " + "2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, " + "2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, " + "2011-01-01 13:00:00]\n" + "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, " + "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n" + " 2011-01-01 12:00:00, " + "2011-01-01 13:00:00]" + ) + + assert repr(c) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + c = Categorical(idx) + exp = ( + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, " + "2011-01-01 13:00:00-05:00]\n" + "Categories (5, datetime64[ns, US/Eastern]): " + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n" + " " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n" + " " + "2011-01-01 13:00:00-05:00]" + ) + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = ( + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, " + "2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, " + "2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, " + "2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n" + "Categories (5, datetime64[ns, US/Eastern]): " + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n" + " " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n" + " " + "2011-01-01 13:00:00-05:00]" + ) + + assert repr(c) == exp + + def test_categorical_repr_datetime_ordered(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + c = Categorical(idx, ordered=True) + exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] +Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] +Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 + + assert repr(c) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + c = Categorical(idx, ordered=True) + exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] +Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < + 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < + 2011-01-01 13:00:00-05:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] +Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < + 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < + 2011-01-01 13:00:00-05:00]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_int_with_nan(self): + c = Categorical([1, 2, np.nan]) + c_exp = """[1, 2, NaN]\nCategories (2, int64): [1, 2]""" + assert repr(c) == c_exp + + s = Series([1, 2, np.nan], dtype="object").astype("category") + s_exp = """0 1\n1 2\n2 NaN +dtype: category +Categories (2, int64): [1, 2]""" + assert repr(s) == s_exp + + def test_categorical_repr_period(self): + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + c = Categorical(idx) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[h]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[h]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + idx = period_range("2011-01", freq="M", periods=5) + c = Categorical(idx) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_period_ordered(self): + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + c = Categorical(idx, ordered=True) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[h]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[h]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + idx = period_range("2011-01", freq="M", periods=5) + c = Categorical(idx, ordered=True) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_timedelta(self): + idx = timedelta_range("1 days", periods=5) + c = Categorical(idx) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa: E501 + + assert repr(c) == exp + + idx = timedelta_range("1 hours", periods=20) + c = Categorical(idx) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 20 +Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, + 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, + 18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 40 +Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, + 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, + 18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_timedelta_ordered(self): + idx = timedelta_range("1 days", periods=5) + c = Categorical(idx, ordered=True) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa: E501 + + assert repr(c) == exp + + idx = timedelta_range("1 hours", periods=20) + c = Categorical(idx, ordered=True) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 20 +Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < + 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < + 18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 40 +Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < + 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < + 18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_index_repr(self): + idx = CategoricalIndex(Categorical([1, 2, 3])) + exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == exp + + i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64))) + exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_ordered(self): + i = CategoricalIndex(Categorical([1, 2, 3], ordered=True)) + exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64), ordered=True)) + exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_datetime(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', + '2011-01-01 11:00:00', '2011-01-01 12:00:00', + '2011-01-01 13:00:00'], + categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', + '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', + '2011-01-01 13:00:00-05:00'], + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_index_repr_datetime_ordered(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', + '2011-01-01 11:00:00', '2011-01-01 12:00:00', + '2011-01-01 13:00:00'], + categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', + '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', + '2011-01-01 13:00:00-05:00'], + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + i = CategoricalIndex(Categorical(idx.append(idx), ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', + '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', + '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00', + '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', + '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_index_repr_period(self): + # test all length + idx = period_range("2011-01-01 09:00", freq="h", periods=1) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = period_range("2011-01-01 09:00", freq="h", periods=2) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = period_range("2011-01-01 09:00", freq="h", periods=3) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', + '2011-01-01 12:00', '2011-01-01 13:00'], + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + i = CategoricalIndex(Categorical(idx.append(idx))) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', + '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00', + '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', + '2011-01-01 13:00'], + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = period_range("2011-01", freq="M", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_period_ordered(self): + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', + '2011-01-01 12:00', '2011-01-01 13:00'], + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = period_range("2011-01", freq="M", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_timedelta(self): + idx = timedelta_range("1 days", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = timedelta_range("1 hours", periods=10) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', + '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', + '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', + '9 days 01:00:00'], + categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_index_repr_timedelta_ordered(self): + idx = timedelta_range("1 days", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = timedelta_range("1 hours", periods=10) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', + '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', + '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', + '9 days 01:00:00'], + categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_str_repr(self): + # GH 33676 + result = repr(Categorical([1, "2", 3, 4])) + expected = "[1, '2', 3, 4]\nCategories (4, object): [1, 3, 4, '2']" + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_array.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_array.py new file mode 100644 index 0000000000000000000000000000000000000000..96263f498935b0d975b12c74b7cd98c6c4853670 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_array.py @@ -0,0 +1,478 @@ +import datetime +import decimal +import re + +import numpy as np +import pytest +import pytz + +import pandas as pd +import pandas._testing as tm +from pandas.api.extensions import register_extension_dtype +from pandas.arrays import ( + BooleanArray, + DatetimeArray, + FloatingArray, + IntegerArray, + IntervalArray, + SparseArray, + TimedeltaArray, +) +from pandas.core.arrays import ( + NumpyExtensionArray, + period_array, +) +from pandas.tests.extension.decimal import ( + DecimalArray, + DecimalDtype, + to_decimal, +) + + +@pytest.mark.parametrize("dtype_unit", ["M8[h]", "M8[m]", "m8[h]", "M8[m]"]) +def test_dt64_array(dtype_unit): + # PR 53817 + dtype_var = np.dtype(dtype_unit) + msg = ( + r"datetime64 and timedelta64 dtype resolutions other than " + r"'s', 'ms', 'us', and 'ns' are deprecated. " + r"In future releases passing unsupported resolutions will " + r"raise an exception." + ) + with tm.assert_produces_warning(FutureWarning, match=re.escape(msg)): + pd.array([], dtype=dtype_var) + + +@pytest.mark.parametrize( + "data, dtype, expected", + [ + # Basic NumPy defaults. + ([], None, FloatingArray._from_sequence([], dtype="Float64")), + ([1, 2], None, IntegerArray._from_sequence([1, 2], dtype="Int64")), + ([1, 2], object, NumpyExtensionArray(np.array([1, 2], dtype=object))), + ( + [1, 2], + np.dtype("float32"), + NumpyExtensionArray(np.array([1.0, 2.0], dtype=np.dtype("float32"))), + ), + ( + np.array([], dtype=object), + None, + NumpyExtensionArray(np.array([], dtype=object)), + ), + ( + np.array([1, 2], dtype="int64"), + None, + IntegerArray._from_sequence([1, 2], dtype="Int64"), + ), + ( + np.array([1.0, 2.0], dtype="float64"), + None, + FloatingArray._from_sequence([1.0, 2.0], dtype="Float64"), + ), + # String alias passes through to NumPy + ([1, 2], "float32", NumpyExtensionArray(np.array([1, 2], dtype="float32"))), + ([1, 2], "int64", NumpyExtensionArray(np.array([1, 2], dtype=np.int64))), + # GH#44715 FloatingArray does not support float16, so fall + # back to NumpyExtensionArray + ( + np.array([1, 2], dtype=np.float16), + None, + NumpyExtensionArray(np.array([1, 2], dtype=np.float16)), + ), + # idempotency with e.g. pd.array(pd.array([1, 2], dtype="int64")) + ( + NumpyExtensionArray(np.array([1, 2], dtype=np.int32)), + None, + NumpyExtensionArray(np.array([1, 2], dtype=np.int32)), + ), + # Period alias + ( + [pd.Period("2000", "D"), pd.Period("2001", "D")], + "Period[D]", + period_array(["2000", "2001"], freq="D"), + ), + # Period dtype + ( + [pd.Period("2000", "D")], + pd.PeriodDtype("D"), + period_array(["2000"], freq="D"), + ), + # Datetime (naive) + ( + [1, 2], + np.dtype("datetime64[ns]"), + DatetimeArray._from_sequence( + np.array([1, 2], dtype="M8[ns]"), dtype="M8[ns]" + ), + ), + ( + [1, 2], + np.dtype("datetime64[s]"), + DatetimeArray._from_sequence( + np.array([1, 2], dtype="M8[s]"), dtype="M8[s]" + ), + ), + ( + np.array([1, 2], dtype="datetime64[ns]"), + None, + DatetimeArray._from_sequence( + np.array([1, 2], dtype="M8[ns]"), dtype="M8[ns]" + ), + ), + ( + pd.DatetimeIndex(["2000", "2001"]), + np.dtype("datetime64[ns]"), + DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"), + ), + ( + pd.DatetimeIndex(["2000", "2001"]), + None, + DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"), + ), + ( + ["2000", "2001"], + np.dtype("datetime64[ns]"), + DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"), + ), + # Datetime (tz-aware) + ( + ["2000", "2001"], + pd.DatetimeTZDtype(tz="CET"), + DatetimeArray._from_sequence( + ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET") + ), + ), + # Timedelta + ( + ["1h", "2h"], + np.dtype("timedelta64[ns]"), + TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"), + ), + ( + pd.TimedeltaIndex(["1h", "2h"]), + np.dtype("timedelta64[ns]"), + TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"), + ), + ( + np.array([1, 2], dtype="m8[s]"), + np.dtype("timedelta64[s]"), + TimedeltaArray._from_sequence( + np.array([1, 2], dtype="m8[s]"), dtype="m8[s]" + ), + ), + ( + pd.TimedeltaIndex(["1h", "2h"]), + None, + TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"), + ), + ( + # preserve non-nano, i.e. don't cast to NumpyExtensionArray + TimedeltaArray._simple_new( + np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]") + ), + None, + TimedeltaArray._simple_new( + np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]") + ), + ), + ( + # preserve non-nano, i.e. don't cast to NumpyExtensionArray + TimedeltaArray._simple_new( + np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]") + ), + np.dtype("m8[s]"), + TimedeltaArray._simple_new( + np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]") + ), + ), + # Category + (["a", "b"], "category", pd.Categorical(["a", "b"])), + ( + ["a", "b"], + pd.CategoricalDtype(None, ordered=True), + pd.Categorical(["a", "b"], ordered=True), + ), + # Interval + ( + [pd.Interval(1, 2), pd.Interval(3, 4)], + "interval", + IntervalArray.from_tuples([(1, 2), (3, 4)]), + ), + # Sparse + ([0, 1], "Sparse[int64]", SparseArray([0, 1], dtype="int64")), + # IntegerNA + ([1, None], "Int16", pd.array([1, None], dtype="Int16")), + ( + pd.Series([1, 2]), + None, + NumpyExtensionArray(np.array([1, 2], dtype=np.int64)), + ), + # String + ( + ["a", None], + "string", + pd.StringDtype() + .construct_array_type() + ._from_sequence(["a", None], dtype=pd.StringDtype()), + ), + ( + ["a", None], + pd.StringDtype(), + pd.StringDtype() + .construct_array_type() + ._from_sequence(["a", None], dtype=pd.StringDtype()), + ), + # Boolean + ( + [True, None], + "boolean", + BooleanArray._from_sequence([True, None], dtype="boolean"), + ), + ( + [True, None], + pd.BooleanDtype(), + BooleanArray._from_sequence([True, None], dtype="boolean"), + ), + # Index + (pd.Index([1, 2]), None, NumpyExtensionArray(np.array([1, 2], dtype=np.int64))), + # Series[EA] returns the EA + ( + pd.Series(pd.Categorical(["a", "b"], categories=["a", "b", "c"])), + None, + pd.Categorical(["a", "b"], categories=["a", "b", "c"]), + ), + # "3rd party" EAs work + ([decimal.Decimal(0), decimal.Decimal(1)], "decimal", to_decimal([0, 1])), + # pass an ExtensionArray, but a different dtype + ( + period_array(["2000", "2001"], freq="D"), + "category", + pd.Categorical([pd.Period("2000", "D"), pd.Period("2001", "D")]), + ), + ], +) +def test_array(data, dtype, expected): + result = pd.array(data, dtype=dtype) + tm.assert_equal(result, expected) + + +def test_array_copy(): + a = np.array([1, 2]) + # default is to copy + b = pd.array(a, dtype=a.dtype) + assert not tm.shares_memory(a, b) + + # copy=True + b = pd.array(a, dtype=a.dtype, copy=True) + assert not tm.shares_memory(a, b) + + # copy=False + b = pd.array(a, dtype=a.dtype, copy=False) + assert tm.shares_memory(a, b) + + +cet = pytz.timezone("CET") + + +@pytest.mark.parametrize( + "data, expected", + [ + # period + ( + [pd.Period("2000", "D"), pd.Period("2001", "D")], + period_array(["2000", "2001"], freq="D"), + ), + # interval + ([pd.Interval(0, 1), pd.Interval(1, 2)], IntervalArray.from_breaks([0, 1, 2])), + # datetime + ( + [pd.Timestamp("2000"), pd.Timestamp("2001")], + DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"), + ), + ( + [datetime.datetime(2000, 1, 1), datetime.datetime(2001, 1, 1)], + DatetimeArray._from_sequence(["2000", "2001"], dtype="M8[ns]"), + ), + ( + np.array([1, 2], dtype="M8[ns]"), + DatetimeArray._from_sequence(np.array([1, 2], dtype="M8[ns]")), + ), + ( + np.array([1, 2], dtype="M8[us]"), + DatetimeArray._simple_new( + np.array([1, 2], dtype="M8[us]"), dtype=np.dtype("M8[us]") + ), + ), + # datetimetz + ( + [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2001", tz="CET")], + DatetimeArray._from_sequence( + ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET", unit="ns") + ), + ), + ( + [ + datetime.datetime(2000, 1, 1, tzinfo=cet), + datetime.datetime(2001, 1, 1, tzinfo=cet), + ], + DatetimeArray._from_sequence( + ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz=cet, unit="ns") + ), + ), + # timedelta + ( + [pd.Timedelta("1h"), pd.Timedelta("2h")], + TimedeltaArray._from_sequence(["1h", "2h"], dtype="m8[ns]"), + ), + ( + np.array([1, 2], dtype="m8[ns]"), + TimedeltaArray._from_sequence(np.array([1, 2], dtype="m8[ns]")), + ), + ( + np.array([1, 2], dtype="m8[us]"), + TimedeltaArray._from_sequence(np.array([1, 2], dtype="m8[us]")), + ), + # integer + ([1, 2], IntegerArray._from_sequence([1, 2], dtype="Int64")), + ([1, None], IntegerArray._from_sequence([1, None], dtype="Int64")), + ([1, pd.NA], IntegerArray._from_sequence([1, pd.NA], dtype="Int64")), + ([1, np.nan], IntegerArray._from_sequence([1, np.nan], dtype="Int64")), + # float + ([0.1, 0.2], FloatingArray._from_sequence([0.1, 0.2], dtype="Float64")), + ([0.1, None], FloatingArray._from_sequence([0.1, pd.NA], dtype="Float64")), + ([0.1, np.nan], FloatingArray._from_sequence([0.1, pd.NA], dtype="Float64")), + ([0.1, pd.NA], FloatingArray._from_sequence([0.1, pd.NA], dtype="Float64")), + # integer-like float + ([1.0, 2.0], FloatingArray._from_sequence([1.0, 2.0], dtype="Float64")), + ([1.0, None], FloatingArray._from_sequence([1.0, pd.NA], dtype="Float64")), + ([1.0, np.nan], FloatingArray._from_sequence([1.0, pd.NA], dtype="Float64")), + ([1.0, pd.NA], FloatingArray._from_sequence([1.0, pd.NA], dtype="Float64")), + # mixed-integer-float + ([1, 2.0], FloatingArray._from_sequence([1.0, 2.0], dtype="Float64")), + ( + [1, np.nan, 2.0], + FloatingArray._from_sequence([1.0, None, 2.0], dtype="Float64"), + ), + # string + ( + ["a", "b"], + pd.StringDtype() + .construct_array_type() + ._from_sequence(["a", "b"], dtype=pd.StringDtype()), + ), + ( + ["a", None], + pd.StringDtype() + .construct_array_type() + ._from_sequence(["a", None], dtype=pd.StringDtype()), + ), + # Boolean + ([True, False], BooleanArray._from_sequence([True, False], dtype="boolean")), + ([True, None], BooleanArray._from_sequence([True, None], dtype="boolean")), + ], +) +def test_array_inference(data, expected): + result = pd.array(data) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "data", + [ + # mix of frequencies + [pd.Period("2000", "D"), pd.Period("2001", "Y")], + # mix of closed + [pd.Interval(0, 1, closed="left"), pd.Interval(1, 2, closed="right")], + # Mix of timezones + [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")], + # Mix of tz-aware and tz-naive + [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000")], + np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")]), + ], +) +def test_array_inference_fails(data): + result = pd.array(data) + expected = NumpyExtensionArray(np.array(data, dtype=object)) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("data", [np.array(0)]) +def test_nd_raises(data): + with pytest.raises(ValueError, match="NumpyExtensionArray must be 1-dimensional"): + pd.array(data, dtype="int64") + + +def test_scalar_raises(): + with pytest.raises(ValueError, match="Cannot pass scalar '1'"): + pd.array(1) + + +def test_dataframe_raises(): + # GH#51167 don't accidentally cast to StringArray by doing inference on columns + df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + msg = "Cannot pass DataFrame to 'pandas.array'" + with pytest.raises(TypeError, match=msg): + pd.array(df) + + +def test_bounds_check(): + # GH21796 + with pytest.raises( + TypeError, match=r"cannot safely cast non-equivalent int(32|64) to uint16" + ): + pd.array([-1, 2, 3], dtype="UInt16") + + +# --------------------------------------------------------------------------- +# A couple dummy classes to ensure that Series and Indexes are unboxed before +# getting to the EA classes. + + +@register_extension_dtype +class DecimalDtype2(DecimalDtype): + name = "decimal2" + + @classmethod + def construct_array_type(cls): + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return DecimalArray2 + + +class DecimalArray2(DecimalArray): + @classmethod + def _from_sequence(cls, scalars, *, dtype=None, copy=False): + if isinstance(scalars, (pd.Series, pd.Index)): + raise TypeError("scalars should not be of type pd.Series or pd.Index") + + return super()._from_sequence(scalars, dtype=dtype, copy=copy) + + +def test_array_unboxes(index_or_series): + box = index_or_series + + data = box([decimal.Decimal("1"), decimal.Decimal("2")]) + dtype = DecimalDtype2() + # make sure it works + with pytest.raises( + TypeError, match="scalars should not be of type pd.Series or pd.Index" + ): + DecimalArray2._from_sequence(data, dtype=dtype) + + result = pd.array(data, dtype="decimal2") + expected = DecimalArray2._from_sequence(data.values, dtype=dtype) + tm.assert_equal(result, expected) + + +def test_array_to_numpy_na(): + # GH#40638 + arr = pd.array([pd.NA, 1], dtype="string[python]") + result = arr.to_numpy(na_value=True, dtype=bool) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimelike.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimelike.py new file mode 100644 index 0000000000000000000000000000000000000000..7f85c891afeedac6c0122d884f706fac6fdac42c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimelike.py @@ -0,0 +1,1340 @@ +from __future__ import annotations + +import re +import warnings + +import numpy as np +import pytest + +from pandas._libs import ( + NaT, + OutOfBoundsDatetime, + Timestamp, +) +from pandas._libs.tslibs.dtypes import freq_to_period_freqstr +from pandas.compat.numpy import np_version_gt2 + +import pandas as pd +from pandas import ( + DatetimeIndex, + Period, + PeriodIndex, + TimedeltaIndex, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, +) + + +# TODO: more freq variants +@pytest.fixture(params=["D", "B", "W", "ME", "QE", "YE"]) +def freqstr(request): + """Fixture returning parametrized frequency in string format.""" + return request.param + + +@pytest.fixture +def period_index(freqstr): + """ + A fixture to provide PeriodIndex objects with different frequencies. + + Most PeriodArray behavior is already tested in PeriodIndex tests, + so here we just test that the PeriodArray behavior matches + the PeriodIndex behavior. + """ + # TODO: non-monotone indexes; NaTs, different start dates + with warnings.catch_warnings(): + # suppress deprecation of Period[B] + warnings.filterwarnings( + "ignore", message="Period with BDay freq", category=FutureWarning + ) + freqstr = freq_to_period_freqstr(1, freqstr) + pi = pd.period_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr) + return pi + + +@pytest.fixture +def datetime_index(freqstr): + """ + A fixture to provide DatetimeIndex objects with different frequencies. + + Most DatetimeArray behavior is already tested in DatetimeIndex tests, + so here we just test that the DatetimeArray behavior matches + the DatetimeIndex behavior. + """ + # TODO: non-monotone indexes; NaTs, different start dates, timezones + dti = pd.date_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr) + return dti + + +@pytest.fixture +def timedelta_index(): + """ + A fixture to provide TimedeltaIndex objects with different frequencies. + Most TimedeltaArray behavior is already tested in TimedeltaIndex tests, + so here we just test that the TimedeltaArray behavior matches + the TimedeltaIndex behavior. + """ + # TODO: flesh this out + return TimedeltaIndex(["1 Day", "3 Hours", "NaT"]) + + +class SharedTests: + index_cls: type[DatetimeIndex | PeriodIndex | TimedeltaIndex] + + @pytest.fixture + def arr1d(self): + """Fixture returning DatetimeArray with daily frequency.""" + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, freq="D") + else: + arr = self.index_cls(data, freq="D")._data + return arr + + def test_compare_len1_raises(self, arr1d): + # make sure we raise when comparing with different lengths, specific + # to the case where one has length-1, which numpy would broadcast + arr = arr1d + idx = self.index_cls(arr) + + with pytest.raises(ValueError, match="Lengths must match"): + arr == arr[:1] + + # test the index classes while we're at it, GH#23078 + with pytest.raises(ValueError, match="Lengths must match"): + idx <= idx[[0]] + + @pytest.mark.parametrize( + "result", + [ + pd.date_range("2020", periods=3), + pd.date_range("2020", periods=3, tz="UTC"), + pd.timedelta_range("0 days", periods=3), + pd.period_range("2020Q1", periods=3, freq="Q"), + ], + ) + def test_compare_with_Categorical(self, result): + expected = pd.Categorical(result) + assert all(result == expected) + assert not any(result != expected) + + @pytest.mark.parametrize("reverse", [True, False]) + @pytest.mark.parametrize("as_index", [True, False]) + def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered): + other = pd.Categorical(arr1d, ordered=ordered) + if as_index: + other = pd.CategoricalIndex(other) + + left, right = arr1d, other + if reverse: + left, right = right, left + + ones = np.ones(arr1d.shape, dtype=bool) + zeros = ~ones + + result = left == right + tm.assert_numpy_array_equal(result, ones) + + result = left != right + tm.assert_numpy_array_equal(result, zeros) + + if not reverse and not as_index: + # Otherwise Categorical raises TypeError bc it is not ordered + # TODO: we should probably get the same behavior regardless? + result = left < right + tm.assert_numpy_array_equal(result, zeros) + + result = left <= right + tm.assert_numpy_array_equal(result, ones) + + result = left > right + tm.assert_numpy_array_equal(result, zeros) + + result = left >= right + tm.assert_numpy_array_equal(result, ones) + + def test_take(self): + data = np.arange(100, dtype="i8") * 24 * 3600 * 10**9 + np.random.default_rng(2).shuffle(data) + + if self.array_cls is PeriodArray: + arr = PeriodArray(data, dtype="period[D]") + else: + arr = self.index_cls(data)._data + idx = self.index_cls._simple_new(arr) + + takers = [1, 4, 94] + result = arr.take(takers) + expected = idx.take(takers) + + tm.assert_index_equal(self.index_cls(result), expected) + + takers = np.array([1, 4, 94]) + result = arr.take(takers) + expected = idx.take(takers) + + tm.assert_index_equal(self.index_cls(result), expected) + + @pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp(2021, 1, 1, 12).time]) + def test_take_fill_raises(self, fill_value, arr1d): + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + arr1d.take([0, 1], allow_fill=True, fill_value=fill_value) + + def test_take_fill(self, arr1d): + arr = arr1d + + result = arr.take([-1, 1], allow_fill=True, fill_value=None) + assert result[0] is NaT + + result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan) + assert result[0] is NaT + + result = arr.take([-1, 1], allow_fill=True, fill_value=NaT) + assert result[0] is NaT + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_take_fill_str(self, arr1d): + # Cast str fill_value matching other fill_value-taking methods + result = arr1d.take([-1, 1], allow_fill=True, fill_value=str(arr1d[-1])) + expected = arr1d[[-1, 1]] + tm.assert_equal(result, expected) + + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + arr1d.take([-1, 1], allow_fill=True, fill_value="foo") + + def test_concat_same_type(self, arr1d): + arr = arr1d + idx = self.index_cls(arr) + idx = idx.insert(0, NaT) + arr = arr1d + + result = arr._concat_same_type([arr[:-1], arr[1:], arr]) + arr2 = arr.astype(object) + expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2])) + + tm.assert_index_equal(self.index_cls(result), expected) + + def test_unbox_scalar(self, arr1d): + result = arr1d._unbox_scalar(arr1d[0]) + expected = arr1d._ndarray.dtype.type + assert isinstance(result, expected) + + result = arr1d._unbox_scalar(NaT) + assert isinstance(result, expected) + + msg = f"'value' should be a {self.scalar_type.__name__}." + with pytest.raises(ValueError, match=msg): + arr1d._unbox_scalar("foo") + + def test_check_compatible_with(self, arr1d): + arr1d._check_compatible_with(arr1d[0]) + arr1d._check_compatible_with(arr1d[:1]) + arr1d._check_compatible_with(NaT) + + def test_scalar_from_string(self, arr1d): + result = arr1d._scalar_from_string(str(arr1d[0])) + assert result == arr1d[0] + + def test_reduce_invalid(self, arr1d): + msg = "does not support reduction 'not a method'" + with pytest.raises(TypeError, match=msg): + arr1d._reduce("not a method") + + @pytest.mark.parametrize("method", ["pad", "backfill"]) + def test_fillna_method_doesnt_change_orig(self, method): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.array_cls._from_sequence(data) + arr[4] = NaT + + fill_value = arr[3] if method == "pad" else arr[5] + + result = arr._pad_or_backfill(method=method) + assert result[4] == fill_value + + # check that the original was not changed + assert arr[4] is NaT + + def test_searchsorted(self): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.array_cls._from_sequence(data) + + # scalar + result = arr.searchsorted(arr[1]) + assert result == 1 + + result = arr.searchsorted(arr[2], side="right") + assert result == 3 + + # own-type + result = arr.searchsorted(arr[1:3]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + result = arr.searchsorted(arr[1:3], side="right") + expected = np.array([2, 3], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + # GH#29884 match numpy convention on whether NaT goes + # at the end or the beginning + result = arr.searchsorted(NaT) + assert result == 10 + + @pytest.mark.parametrize("box", [None, "index", "series"]) + def test_searchsorted_castable_strings(self, arr1d, box, string_storage): + arr = arr1d + if box is None: + pass + elif box == "index": + # Test the equivalent Index.searchsorted method while we're here + arr = self.index_cls(arr) + else: + # Test the equivalent Series.searchsorted method while we're here + arr = pd.Series(arr) + + # scalar + result = arr.searchsorted(str(arr[1])) + assert result == 1 + + result = arr.searchsorted(str(arr[2]), side="right") + assert result == 3 + + result = arr.searchsorted([str(x) for x in arr[1:3]]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + with pytest.raises( + TypeError, + match=re.escape( + f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " + "or array of those. Got 'str' instead." + ), + ): + arr.searchsorted("foo") + + with pd.option_context("string_storage", string_storage): + with pytest.raises( + TypeError, + match=re.escape( + f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " + "or array of those. Got string array instead." + ), + ): + arr.searchsorted([str(arr[1]), "baz"]) + + def test_getitem_near_implementation_bounds(self): + # We only check tz-naive for DTA bc the bounds are slightly different + # for other tzs + i8vals = np.asarray([NaT._value + n for n in range(1, 5)], dtype="i8") + if self.array_cls is PeriodArray: + arr = self.array_cls(i8vals, dtype="period[ns]") + else: + arr = self.index_cls(i8vals, freq="ns")._data + arr[0] # should not raise OutOfBoundsDatetime + + index = pd.Index(arr) + index[0] # should not raise OutOfBoundsDatetime + + ser = pd.Series(arr) + ser[0] # should not raise OutOfBoundsDatetime + + def test_getitem_2d(self, arr1d): + # 2d slicing on a 1D array + expected = type(arr1d)._simple_new( + arr1d._ndarray[:, np.newaxis], dtype=arr1d.dtype + ) + result = arr1d[:, np.newaxis] + tm.assert_equal(result, expected) + + # Lookup on a 2D array + arr2d = expected + expected = type(arr2d)._simple_new(arr2d._ndarray[:3, 0], dtype=arr2d.dtype) + result = arr2d[:3, 0] + tm.assert_equal(result, expected) + + # Scalar lookup + result = arr2d[-1, 0] + expected = arr1d[-1] + assert result == expected + + def test_iter_2d(self, arr1d): + data2d = arr1d._ndarray[:3, np.newaxis] + arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype) + result = list(arr2d) + assert len(result) == 3 + for x in result: + assert isinstance(x, type(arr1d)) + assert x.ndim == 1 + assert x.dtype == arr1d.dtype + + def test_repr_2d(self, arr1d): + data2d = arr1d._ndarray[:3, np.newaxis] + arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype) + + result = repr(arr2d) + + if isinstance(arr2d, TimedeltaArray): + expected = ( + f"<{type(arr2d).__name__}>\n" + "[\n" + f"['{arr1d[0]._repr_base()}'],\n" + f"['{arr1d[1]._repr_base()}'],\n" + f"['{arr1d[2]._repr_base()}']\n" + "]\n" + f"Shape: (3, 1), dtype: {arr1d.dtype}" + ) + else: + expected = ( + f"<{type(arr2d).__name__}>\n" + "[\n" + f"['{arr1d[0]}'],\n" + f"['{arr1d[1]}'],\n" + f"['{arr1d[2]}']\n" + "]\n" + f"Shape: (3, 1), dtype: {arr1d.dtype}" + ) + + assert result == expected + + def test_setitem(self): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.index_cls(data, freq="D")._data + + arr[0] = arr[1] + expected = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + expected[0] = expected[1] + + tm.assert_numpy_array_equal(arr.asi8, expected) + + arr[:2] = arr[-2:] + expected[:2] = expected[-2:] + tm.assert_numpy_array_equal(arr.asi8, expected) + + @pytest.mark.parametrize( + "box", + [ + pd.Index, + pd.Series, + np.array, + list, + NumpyExtensionArray, + ], + ) + def test_setitem_object_dtype(self, box, arr1d): + expected = arr1d.copy()[::-1] + if expected.dtype.kind in ["m", "M"]: + expected = expected._with_freq(None) + + vals = expected + if box is list: + vals = list(vals) + elif box is np.array: + # if we do np.array(x).astype(object) then dt64 and td64 cast to ints + vals = np.array(vals.astype(object)) + elif box is NumpyExtensionArray: + vals = box(np.asarray(vals, dtype=object)) + else: + vals = box(vals).astype(object) + + arr1d[:] = vals + + tm.assert_equal(arr1d, expected) + + def test_setitem_strs(self, arr1d): + # Check that we parse strs in both scalar and listlike + + # Setting list-like of strs + expected = arr1d.copy() + expected[[0, 1]] = arr1d[-2:] + + result = arr1d.copy() + result[:2] = [str(x) for x in arr1d[-2:]] + tm.assert_equal(result, expected) + + # Same thing but now for just a scalar str + expected = arr1d.copy() + expected[0] = arr1d[-1] + + result = arr1d.copy() + result[0] = str(arr1d[-1]) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("as_index", [True, False]) + def test_setitem_categorical(self, arr1d, as_index): + expected = arr1d.copy()[::-1] + if not isinstance(expected, PeriodArray): + expected = expected._with_freq(None) + + cat = pd.Categorical(arr1d) + if as_index: + cat = pd.CategoricalIndex(cat) + + arr1d[:] = cat[::-1] + + tm.assert_equal(arr1d, expected) + + def test_setitem_raises(self, arr1d): + arr = arr1d[:10] + val = arr[0] + + with pytest.raises(IndexError, match="index 12 is out of bounds"): + arr[12] = val + + with pytest.raises(TypeError, match="value should be a.* 'object'"): + arr[0] = object() + + msg = "cannot set using a list-like indexer with a different length" + with pytest.raises(ValueError, match=msg): + # GH#36339 + arr[[]] = [arr[1]] + + msg = "cannot set using a slice indexer with a different length than" + with pytest.raises(ValueError, match=msg): + # GH#36339 + arr[1:1] = arr[:3] + + @pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series]) + def test_setitem_numeric_raises(self, arr1d, box): + # We dont case e.g. int64 to our own dtype for setitem + + msg = ( + f"value should be a '{arr1d._scalar_type.__name__}', " + "'NaT', or array of those. Got" + ) + with pytest.raises(TypeError, match=msg): + arr1d[:2] = box([0, 1]) + + with pytest.raises(TypeError, match=msg): + arr1d[:2] = box([0.0, 1.0]) + + def test_inplace_arithmetic(self): + # GH#24115 check that iadd and isub are actually in-place + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.index_cls(data, freq="D")._data + + expected = arr + pd.Timedelta(days=1) + arr += pd.Timedelta(days=1) + tm.assert_equal(arr, expected) + + expected = arr - pd.Timedelta(days=1) + arr -= pd.Timedelta(days=1) + tm.assert_equal(arr, expected) + + def test_shift_fill_int_deprecated(self, arr1d): + # GH#31971, enforced in 2.0 + with pytest.raises(TypeError, match="value should be a"): + arr1d.shift(1, fill_value=1) + + def test_median(self, arr1d): + arr = arr1d + if len(arr) % 2 == 0: + # make it easier to define `expected` + arr = arr[:-1] + + expected = arr[len(arr) // 2] + + result = arr.median() + assert type(result) is type(expected) + assert result == expected + + arr[len(arr) // 2] = NaT + if not isinstance(expected, Period): + expected = arr[len(arr) // 2 - 1 : len(arr) // 2 + 2].mean() + + assert arr.median(skipna=False) is NaT + + result = arr.median() + assert type(result) is type(expected) + assert result == expected + + assert arr[:0].median() is NaT + assert arr[:0].median(skipna=False) is NaT + + # 2d Case + arr2 = arr.reshape(-1, 1) + + result = arr2.median(axis=None) + assert type(result) is type(expected) + assert result == expected + + assert arr2.median(axis=None, skipna=False) is NaT + + result = arr2.median(axis=0) + expected2 = type(arr)._from_sequence([expected], dtype=arr.dtype) + tm.assert_equal(result, expected2) + + result = arr2.median(axis=0, skipna=False) + expected2 = type(arr)._from_sequence([NaT], dtype=arr.dtype) + tm.assert_equal(result, expected2) + + result = arr2.median(axis=1) + tm.assert_equal(result, arr) + + result = arr2.median(axis=1, skipna=False) + tm.assert_equal(result, arr) + + def test_from_integer_array(self): + arr = np.array([1, 2, 3], dtype=np.int64) + data = pd.array(arr, dtype="Int64") + if self.array_cls is PeriodArray: + expected = self.array_cls(arr, dtype=self.example_dtype) + result = self.array_cls(data, dtype=self.example_dtype) + else: + expected = self.array_cls._from_sequence(arr, dtype=self.example_dtype) + result = self.array_cls._from_sequence(data, dtype=self.example_dtype) + + tm.assert_extension_array_equal(result, expected) + + +class TestDatetimeArray(SharedTests): + index_cls = DatetimeIndex + array_cls = DatetimeArray + scalar_type = Timestamp + example_dtype = "M8[ns]" + + @pytest.fixture + def arr1d(self, tz_naive_fixture, freqstr): + """ + Fixture returning DatetimeArray with parametrized frequency and + timezones + """ + tz = tz_naive_fixture + dti = pd.date_range("2016-01-01 01:01:00", periods=5, freq=freqstr, tz=tz) + dta = dti._data + return dta + + def test_round(self, arr1d): + # GH#24064 + dti = self.index_cls(arr1d) + + result = dti.round(freq="2min") + expected = dti - pd.Timedelta(minutes=1) + expected = expected._with_freq(None) + tm.assert_index_equal(result, expected) + + dta = dti._data + result = dta.round(freq="2min") + expected = expected._data._with_freq(None) + tm.assert_datetime_array_equal(result, expected) + + def test_array_interface(self, datetime_index): + arr = datetime_index._data + copy_false = None if np_version_gt2 else False + + # default asarray gives the same underlying data (for tz naive) + result = np.asarray(arr) + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, copy=copy_false) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + + # specifying M8[ns] gives the same result as default + result = np.asarray(arr, dtype="datetime64[ns]") + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="datetime64[ns]", copy=copy_false) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="datetime64[ns]") + assert result is not expected + tm.assert_numpy_array_equal(result, expected) + + # to object dtype + result = np.asarray(arr, dtype=object) + expected = np.array(list(arr), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # to other dtype always copies + result = np.asarray(arr, dtype="int64") + assert result is not arr.asi8 + assert not np.may_share_memory(arr, result) + expected = arr.asi8.copy() + tm.assert_numpy_array_equal(result, expected) + + # other dtypes handled by numpy + for dtype in ["float64", str]: + result = np.asarray(arr, dtype=dtype) + expected = np.asarray(arr).astype(dtype) + tm.assert_numpy_array_equal(result, expected) + + def test_array_object_dtype(self, arr1d): + # GH#23524 + arr = arr1d + dti = self.index_cls(arr1d) + + expected = np.array(list(dti)) + + result = np.array(arr, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # also test the DatetimeIndex method while we're at it + result = np.array(dti, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_array_tz(self, arr1d): + # GH#23524 + arr = arr1d + dti = self.index_cls(arr1d) + copy_false = None if np_version_gt2 else False + + expected = dti.asi8.view("M8[ns]") + result = np.array(arr, dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + result = np.array(arr, dtype="datetime64[ns]") + tm.assert_numpy_array_equal(result, expected) + + # check that we are not making copies when setting copy=copy_false + result = np.array(arr, dtype="M8[ns]", copy=copy_false) + assert result.base is expected.base + assert result.base is not None + result = np.array(arr, dtype="datetime64[ns]", copy=copy_false) + assert result.base is expected.base + assert result.base is not None + + def test_array_i8_dtype(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + copy_false = None if np_version_gt2 else False + + expected = dti.asi8 + result = np.array(arr, dtype="i8") + tm.assert_numpy_array_equal(result, expected) + + result = np.array(arr, dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) + + # check that we are still making copies when setting copy=copy_false + result = np.array(arr, dtype="i8", copy=copy_false) + assert result.base is not expected.base + assert result.base is None + + def test_from_array_keeps_base(self): + # Ensure that DatetimeArray._ndarray.base isn't lost. + arr = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]") + dta = DatetimeArray._from_sequence(arr) + + assert dta._ndarray is arr + dta = DatetimeArray._from_sequence(arr[:0]) + assert dta._ndarray.base is arr + + def test_from_dti(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + assert list(dti) == list(arr) + + # Check that Index.__new__ knows what to do with DatetimeArray + dti2 = pd.Index(arr) + assert isinstance(dti2, DatetimeIndex) + assert list(dti2) == list(arr) + + def test_astype_object(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + + asobj = arr.astype("O") + assert isinstance(asobj, np.ndarray) + assert asobj.dtype == "O" + assert list(asobj) == list(dti) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_to_period(self, datetime_index, freqstr): + dti = datetime_index + arr = dti._data + + freqstr = freq_to_period_freqstr(1, freqstr) + expected = dti.to_period(freq=freqstr) + result = arr.to_period(freq=freqstr) + assert isinstance(result, PeriodArray) + + tm.assert_equal(result, expected._data) + + def test_to_period_2d(self, arr1d): + arr2d = arr1d.reshape(1, -1) + + warn = None if arr1d.tz is None else UserWarning + with tm.assert_produces_warning(warn): + result = arr2d.to_period("D") + expected = arr1d.to_period("D").reshape(1, -1) + tm.assert_period_array_equal(result, expected) + + @pytest.mark.parametrize("propname", DatetimeArray._bool_ops) + def test_bool_properties(self, arr1d, propname): + # in this case _bool_ops is just `is_leap_year` + dti = self.index_cls(arr1d) + arr = arr1d + assert dti.freq == arr.freq + + result = getattr(arr, propname) + expected = np.array(getattr(dti, propname), dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("propname", DatetimeArray._field_ops) + def test_int_properties(self, arr1d, propname): + dti = self.index_cls(arr1d) + arr = arr1d + + result = getattr(arr, propname) + expected = np.array(getattr(dti, propname), dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + def test_take_fill_valid(self, arr1d, fixed_now_ts): + arr = arr1d + dti = self.index_cls(arr1d) + + now = fixed_now_ts.tz_localize(dti.tz) + result = arr.take([-1, 1], allow_fill=True, fill_value=now) + assert result[0] == now + + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # fill_value Timedelta invalid + arr.take([-1, 1], allow_fill=True, fill_value=now - now) + + with pytest.raises(TypeError, match=msg): + # fill_value Period invalid + arr.take([-1, 1], allow_fill=True, fill_value=Period("2014Q1")) + + tz = None if dti.tz is not None else "US/Eastern" + now = fixed_now_ts.tz_localize(tz) + msg = "Cannot compare tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + # Timestamp with mismatched tz-awareness + arr.take([-1, 1], allow_fill=True, fill_value=now) + + value = NaT._value + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # require NaT, not iNaT, as it could be confused with an integer + arr.take([-1, 1], allow_fill=True, fill_value=value) + + value = np.timedelta64("NaT", "ns") + with pytest.raises(TypeError, match=msg): + # require appropriate-dtype if we have a NA value + arr.take([-1, 1], allow_fill=True, fill_value=value) + + if arr.tz is not None: + # GH#37356 + # Assuming here that arr1d fixture does not include Australia/Melbourne + value = fixed_now_ts.tz_localize("Australia/Melbourne") + result = arr.take([-1, 1], allow_fill=True, fill_value=value) + + expected = arr.take( + [-1, 1], + allow_fill=True, + fill_value=value.tz_convert(arr.dtype.tz), + ) + tm.assert_equal(result, expected) + + def test_concat_same_type_invalid(self, arr1d): + # different timezones + arr = arr1d + + if arr.tz is None: + other = arr.tz_localize("UTC") + else: + other = arr.tz_localize(None) + + with pytest.raises(ValueError, match="to_concat must have the same"): + arr._concat_same_type([arr, other]) + + def test_concat_same_type_different_freq(self, unit): + # we *can* concatenate DTI with different freqs. + a = pd.date_range("2000", periods=2, freq="D", tz="US/Central", unit=unit)._data + b = pd.date_range("2000", periods=2, freq="h", tz="US/Central", unit=unit)._data + result = DatetimeArray._concat_same_type([a, b]) + expected = ( + pd.to_datetime( + [ + "2000-01-01 00:00:00", + "2000-01-02 00:00:00", + "2000-01-01 00:00:00", + "2000-01-01 01:00:00", + ] + ) + .tz_localize("US/Central") + .as_unit(unit) + ._data + ) + + tm.assert_datetime_array_equal(result, expected) + + def test_strftime(self, arr1d): + arr = arr1d + + result = arr.strftime("%Y %b") + expected = np.array([ts.strftime("%Y %b") for ts in arr], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_strftime_nat(self): + # GH 29578 + arr = DatetimeIndex(["2019-01-01", NaT])._data + + result = arr.strftime("%Y-%m-%d") + expected = np.array(["2019-01-01", np.nan], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +class TestTimedeltaArray(SharedTests): + index_cls = TimedeltaIndex + array_cls = TimedeltaArray + scalar_type = pd.Timedelta + example_dtype = "m8[ns]" + + def test_from_tdi(self): + tdi = TimedeltaIndex(["1 Day", "3 Hours"]) + arr = tdi._data + assert list(arr) == list(tdi) + + # Check that Index.__new__ knows what to do with TimedeltaArray + tdi2 = pd.Index(arr) + assert isinstance(tdi2, TimedeltaIndex) + assert list(tdi2) == list(arr) + + def test_astype_object(self): + tdi = TimedeltaIndex(["1 Day", "3 Hours"]) + arr = tdi._data + asobj = arr.astype("O") + assert isinstance(asobj, np.ndarray) + assert asobj.dtype == "O" + assert list(asobj) == list(tdi) + + def test_to_pytimedelta(self, timedelta_index): + tdi = timedelta_index + arr = tdi._data + + expected = tdi.to_pytimedelta() + result = arr.to_pytimedelta() + + tm.assert_numpy_array_equal(result, expected) + + def test_total_seconds(self, timedelta_index): + tdi = timedelta_index + arr = tdi._data + + expected = tdi.total_seconds() + result = arr.total_seconds() + + tm.assert_numpy_array_equal(result, expected.values) + + @pytest.mark.parametrize("propname", TimedeltaArray._field_ops) + def test_int_properties(self, timedelta_index, propname): + tdi = timedelta_index + arr = tdi._data + + result = getattr(arr, propname) + expected = np.array(getattr(tdi, propname), dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + def test_array_interface(self, timedelta_index): + arr = timedelta_index._data + copy_false = None if np_version_gt2 else False + + # default asarray gives the same underlying data + result = np.asarray(arr) + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, copy=copy_false) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + + # specifying m8[ns] gives the same result as default + result = np.asarray(arr, dtype="timedelta64[ns]") + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="timedelta64[ns]", copy=copy_false) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="timedelta64[ns]") + assert result is not expected + tm.assert_numpy_array_equal(result, expected) + + # to object dtype + result = np.asarray(arr, dtype=object) + expected = np.array(list(arr), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # to other dtype always copies + result = np.asarray(arr, dtype="int64") + assert result is not arr.asi8 + assert not np.may_share_memory(arr, result) + expected = arr.asi8.copy() + tm.assert_numpy_array_equal(result, expected) + + # other dtypes handled by numpy + for dtype in ["float64", str]: + result = np.asarray(arr, dtype=dtype) + expected = np.asarray(arr).astype(dtype) + tm.assert_numpy_array_equal(result, expected) + + def test_take_fill_valid(self, timedelta_index, fixed_now_ts): + tdi = timedelta_index + arr = tdi._data + + td1 = pd.Timedelta(days=1) + result = arr.take([-1, 1], allow_fill=True, fill_value=td1) + assert result[0] == td1 + + value = fixed_now_ts + msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # fill_value Timestamp invalid + arr.take([0, 1], allow_fill=True, fill_value=value) + + value = fixed_now_ts.to_period("D") + with pytest.raises(TypeError, match=msg): + # fill_value Period invalid + arr.take([0, 1], allow_fill=True, fill_value=value) + + value = np.datetime64("NaT", "ns") + with pytest.raises(TypeError, match=msg): + # require appropriate-dtype if we have a NA value + arr.take([-1, 1], allow_fill=True, fill_value=value) + + +@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning") +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +class TestPeriodArray(SharedTests): + index_cls = PeriodIndex + array_cls = PeriodArray + scalar_type = Period + example_dtype = PeriodIndex([], freq="W").dtype + + @pytest.fixture + def arr1d(self, period_index): + """ + Fixture returning DatetimeArray from parametrized PeriodIndex objects + """ + return period_index._data + + def test_from_pi(self, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d + assert list(arr) == list(pi) + + # Check that Index.__new__ knows what to do with PeriodArray + pi2 = pd.Index(arr) + assert isinstance(pi2, PeriodIndex) + assert list(pi2) == list(arr) + + def test_astype_object(self, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d + asobj = arr.astype("O") + assert isinstance(asobj, np.ndarray) + assert asobj.dtype == "O" + assert list(asobj) == list(pi) + + def test_take_fill_valid(self, arr1d): + arr = arr1d + + value = NaT._value + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # require NaT, not iNaT, as it could be confused with an integer + arr.take([-1, 1], allow_fill=True, fill_value=value) + + value = np.timedelta64("NaT", "ns") + with pytest.raises(TypeError, match=msg): + # require appropriate-dtype if we have a NA value + arr.take([-1, 1], allow_fill=True, fill_value=value) + + @pytest.mark.parametrize("how", ["S", "E"]) + def test_to_timestamp(self, how, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d + + expected = DatetimeIndex(pi.to_timestamp(how=how))._data + result = arr.to_timestamp(how=how) + assert isinstance(result, DatetimeArray) + + tm.assert_equal(result, expected) + + def test_to_timestamp_roundtrip_bday(self): + # Case where infer_freq inside would choose "D" instead of "B" + dta = pd.date_range("2021-10-18", periods=3, freq="B")._data + parr = dta.to_period() + result = parr.to_timestamp() + assert result.freq == "B" + tm.assert_extension_array_equal(result, dta) + + dta2 = dta[::2] + parr2 = dta2.to_period() + result2 = parr2.to_timestamp() + assert result2.freq == "2B" + tm.assert_extension_array_equal(result2, dta2) + + parr3 = dta.to_period("2B") + result3 = parr3.to_timestamp() + assert result3.freq == "B" + tm.assert_extension_array_equal(result3, dta) + + def test_to_timestamp_out_of_bounds(self): + # GH#19643 previously overflowed silently + pi = pd.period_range("1500", freq="Y", periods=3) + msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00" + with pytest.raises(OutOfBoundsDatetime, match=msg): + pi.to_timestamp() + + with pytest.raises(OutOfBoundsDatetime, match=msg): + pi._data.to_timestamp() + + @pytest.mark.parametrize("propname", PeriodArray._bool_ops) + def test_bool_properties(self, arr1d, propname): + # in this case _bool_ops is just `is_leap_year` + pi = self.index_cls(arr1d) + arr = arr1d + + result = getattr(arr, propname) + expected = np.array(getattr(pi, propname)) + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("propname", PeriodArray._field_ops) + def test_int_properties(self, arr1d, propname): + pi = self.index_cls(arr1d) + arr = arr1d + + result = getattr(arr, propname) + expected = np.array(getattr(pi, propname)) + + tm.assert_numpy_array_equal(result, expected) + + def test_array_interface(self, arr1d): + arr = arr1d + + # default asarray gives objects + result = np.asarray(arr) + expected = np.array(list(arr), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # to object dtype (same as default) + result = np.asarray(arr, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(arr, dtype="int64") + tm.assert_numpy_array_equal(result, arr.asi8) + + # to other dtypes + msg = r"float\(\) argument must be a string or a( real)? number, not 'Period'" + with pytest.raises(TypeError, match=msg): + np.asarray(arr, dtype="float64") + + result = np.asarray(arr, dtype="S20") + expected = np.asarray(arr).astype("S20") + tm.assert_numpy_array_equal(result, expected) + + def test_strftime(self, arr1d): + arr = arr1d + + result = arr.strftime("%Y") + expected = np.array([per.strftime("%Y") for per in arr], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_strftime_nat(self): + # GH 29578 + arr = PeriodArray(PeriodIndex(["2019-01-01", NaT], dtype="period[D]")) + + result = arr.strftime("%Y-%m-%d") + expected = np.array(["2019-01-01", np.nan], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "arr,casting_nats", + [ + ( + TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, + (NaT, np.timedelta64("NaT", "ns")), + ), + ( + pd.date_range("2000-01-01", periods=3, freq="D")._data, + (NaT, np.datetime64("NaT", "ns")), + ), + (pd.period_range("2000-01-01", periods=3, freq="D")._data, (NaT,)), + ], + ids=lambda x: type(x).__name__, +) +def test_casting_nat_setitem_array(arr, casting_nats): + expected = type(arr)._from_sequence([NaT, arr[1], arr[2]], dtype=arr.dtype) + + for nat in casting_nats: + arr = arr.copy() + arr[0] = nat + tm.assert_equal(arr, expected) + + +@pytest.mark.parametrize( + "arr,non_casting_nats", + [ + ( + TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, + (np.datetime64("NaT", "ns"), NaT._value), + ), + ( + pd.date_range("2000-01-01", periods=3, freq="D")._data, + (np.timedelta64("NaT", "ns"), NaT._value), + ), + ( + pd.period_range("2000-01-01", periods=3, freq="D")._data, + (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), NaT._value), + ), + ], + ids=lambda x: type(x).__name__, +) +def test_invalid_nat_setitem_array(arr, non_casting_nats): + msg = ( + "value should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. " + "Got '(timedelta64|datetime64|int)' instead." + ) + + for nat in non_casting_nats: + with pytest.raises(TypeError, match=msg): + arr[0] = nat + + +@pytest.mark.parametrize( + "arr", + [ + pd.date_range("2000", periods=4).array, + pd.timedelta_range("2000", periods=4).array, + ], +) +def test_to_numpy_extra(arr): + arr[0] = NaT + original = arr.copy() + + result = arr.to_numpy() + assert np.isnan(result[0]) + + result = arr.to_numpy(dtype="int64") + assert result[0] == -9223372036854775808 + + result = arr.to_numpy(dtype="int64", na_value=0) + assert result[0] == 0 + + result = arr.to_numpy(na_value=arr[1].to_numpy()) + assert result[0] == result[1] + + result = arr.to_numpy(na_value=arr[1].to_numpy(copy=False)) + assert result[0] == result[1] + + tm.assert_equal(arr, original) + + +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize( + "values", + [ + pd.to_datetime(["2020-01-01", "2020-02-01"]), + pd.to_timedelta([1, 2], unit="D"), + PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"), + ], +) +@pytest.mark.parametrize( + "klass", + [ + list, + np.array, + pd.array, + pd.Series, + pd.Index, + pd.Categorical, + pd.CategoricalIndex, + ], +) +def test_searchsorted_datetimelike_with_listlike(values, klass, as_index): + # https://github.com/pandas-dev/pandas/issues/32762 + if not as_index: + values = values._data + + result = values.searchsorted(klass(values)) + expected = np.array([0, 1], dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + pd.to_datetime(["2020-01-01", "2020-02-01"]), + pd.to_timedelta([1, 2], unit="D"), + PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"), + ], +) +@pytest.mark.parametrize( + "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] +) +def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg): + # https://github.com/pandas-dev/pandas/issues/32762 + msg = "[Unexpected type|Cannot compare]" + with pytest.raises(TypeError, match=msg): + values.searchsorted(arg) + + +@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series]) +def test_period_index_construction_from_strings(klass): + # https://github.com/pandas-dev/pandas/issues/26109 + strings = ["2020Q1", "2020Q2"] * 2 + data = klass(strings) + result = PeriodIndex(data, freq="Q") + expected = PeriodIndex([Period(s) for s in strings]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) +def test_from_pandas_array(dtype): + # GH#24615 + data = np.array([1, 2, 3], dtype=dtype) + arr = NumpyExtensionArray(data) + + cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype] + + depr_msg = f"{cls.__name__}.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = cls(arr) + expected = cls(data) + tm.assert_extension_array_equal(result, expected) + + result = cls._from_sequence(arr, dtype=dtype) + expected = cls._from_sequence(data, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype] + result = func(arr).array + expected = func(data).array + tm.assert_equal(result, expected) + + # Let's check the Indexes while we're here + idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype] + result = idx_cls(arr) + expected = idx_cls(data) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimes.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimes.py new file mode 100644 index 0000000000000000000000000000000000000000..8f0576cc65a2787edacdb1e377a02287d1caaff1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimes.py @@ -0,0 +1,840 @@ +""" +Tests for DatetimeArray +""" +from __future__ import annotations + +from datetime import timedelta +import operator + +try: + from zoneinfo import ZoneInfo +except ImportError: + # Cannot assign to a type + ZoneInfo = None # type: ignore[misc, assignment] + +import numpy as np +import pytest + +from pandas._libs.tslibs import tz_compare + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) + + +class TestNonNano: + @pytest.fixture(params=["s", "ms", "us"]) + def unit(self, request): + """Fixture returning parametrized time units""" + return request.param + + @pytest.fixture + def dtype(self, unit, tz_naive_fixture): + tz = tz_naive_fixture + if tz is None: + return np.dtype(f"datetime64[{unit}]") + else: + return DatetimeTZDtype(unit=unit, tz=tz) + + @pytest.fixture + def dta_dti(self, unit, dtype): + tz = getattr(dtype, "tz", None) + + dti = pd.date_range("2016-01-01", periods=55, freq="D", tz=tz) + if tz is None: + arr = np.asarray(dti).astype(f"M8[{unit}]") + else: + arr = np.asarray(dti.tz_convert("UTC").tz_localize(None)).astype( + f"M8[{unit}]" + ) + + dta = DatetimeArray._simple_new(arr, dtype=dtype) + return dta, dti + + @pytest.fixture + def dta(self, dta_dti): + dta, dti = dta_dti + return dta + + def test_non_nano(self, unit, dtype): + arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]") + dta = DatetimeArray._simple_new(arr, dtype=dtype) + + assert dta.dtype == dtype + assert dta[0].unit == unit + assert tz_compare(dta.tz, dta[0].tz) + assert (dta[0] == dta[:1]).all() + + @pytest.mark.parametrize( + "field", DatetimeArray._field_ops + DatetimeArray._bool_ops + ) + def test_fields(self, unit, field, dtype, dta_dti): + dta, dti = dta_dti + + assert (dti == dta).all() + + res = getattr(dta, field) + expected = getattr(dti._data, field) + tm.assert_numpy_array_equal(res, expected) + + def test_normalize(self, unit): + dti = pd.date_range("2016-01-01 06:00:00", periods=55, freq="D") + arr = np.asarray(dti).astype(f"M8[{unit}]") + + dta = DatetimeArray._simple_new(arr, dtype=arr.dtype) + + assert not dta.is_normalized + + # TODO: simplify once we can just .astype to other unit + exp = np.asarray(dti.normalize()).astype(f"M8[{unit}]") + expected = DatetimeArray._simple_new(exp, dtype=exp.dtype) + + res = dta.normalize() + tm.assert_extension_array_equal(res, expected) + + def test_simple_new_requires_match(self, unit): + arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]") + dtype = DatetimeTZDtype(unit, "UTC") + + dta = DatetimeArray._simple_new(arr, dtype=dtype) + assert dta.dtype == dtype + + wrong = DatetimeTZDtype("ns", "UTC") + with pytest.raises(AssertionError, match=""): + DatetimeArray._simple_new(arr, dtype=wrong) + + def test_std_non_nano(self, unit): + dti = pd.date_range("2016-01-01", periods=55, freq="D") + arr = np.asarray(dti).astype(f"M8[{unit}]") + + dta = DatetimeArray._simple_new(arr, dtype=arr.dtype) + + # we should match the nano-reso std, but floored to our reso. + res = dta.std() + assert res._creso == dta._creso + assert res == dti.std().floor(unit) + + @pytest.mark.filterwarnings("ignore:Converting to PeriodArray.*:UserWarning") + def test_to_period(self, dta_dti): + dta, dti = dta_dti + result = dta.to_period("D") + expected = dti._data.to_period("D") + + tm.assert_extension_array_equal(result, expected) + + def test_iter(self, dta): + res = next(iter(dta)) + expected = dta[0] + + assert type(res) is pd.Timestamp + assert res._value == expected._value + assert res._creso == expected._creso + assert res == expected + + def test_astype_object(self, dta): + result = dta.astype(object) + assert all(x._creso == dta._creso for x in result) + assert all(x == y for x, y in zip(result, dta)) + + def test_to_pydatetime(self, dta_dti): + dta, dti = dta_dti + + result = dta.to_pydatetime() + expected = dti.to_pydatetime() + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("meth", ["time", "timetz", "date"]) + def test_time_date(self, dta_dti, meth): + dta, dti = dta_dti + + result = getattr(dta, meth) + expected = getattr(dti, meth) + tm.assert_numpy_array_equal(result, expected) + + def test_format_native_types(self, unit, dtype, dta_dti): + # In this case we should get the same formatted values with our nano + # version dti._data as we do with the non-nano dta + dta, dti = dta_dti + + res = dta._format_native_types() + exp = dti._data._format_native_types() + tm.assert_numpy_array_equal(res, exp) + + def test_repr(self, dta_dti, unit): + dta, dti = dta_dti + + assert repr(dta) == repr(dti._data).replace("[ns", f"[{unit}") + + # TODO: tests with td64 + def test_compare_mismatched_resolutions(self, comparison_op): + # comparison that numpy gets wrong bc of silent overflows + op = comparison_op + + iinfo = np.iinfo(np.int64) + vals = np.array([iinfo.min, iinfo.min + 1, iinfo.max], dtype=np.int64) + + # Construct so that arr2[1] < arr[1] < arr[2] < arr2[2] + arr = np.array(vals).view("M8[ns]") + arr2 = arr.view("M8[s]") + + left = DatetimeArray._simple_new(arr, dtype=arr.dtype) + right = DatetimeArray._simple_new(arr2, dtype=arr2.dtype) + + if comparison_op is operator.eq: + expected = np.array([False, False, False]) + elif comparison_op is operator.ne: + expected = np.array([True, True, True]) + elif comparison_op in [operator.lt, operator.le]: + expected = np.array([False, False, True]) + else: + expected = np.array([False, True, False]) + + result = op(left, right) + tm.assert_numpy_array_equal(result, expected) + + result = op(left[1], right) + tm.assert_numpy_array_equal(result, expected) + + if op not in [operator.eq, operator.ne]: + # check that numpy still gets this wrong; if it is fixed we may be + # able to remove compare_mismatched_resolutions + np_res = op(left._ndarray, right._ndarray) + tm.assert_numpy_array_equal(np_res[1:], ~expected[1:]) + + def test_add_mismatched_reso_doesnt_downcast(self): + # https://github.com/pandas-dev/pandas/pull/48748#issuecomment-1260181008 + td = pd.Timedelta(microseconds=1) + dti = pd.date_range("2016-01-01", periods=3) - td + dta = dti._data.as_unit("us") + + res = dta + td.as_unit("us") + # even though the result is an even number of days + # (so we _could_ downcast to unit="s"), we do not. + assert res.unit == "us" + + @pytest.mark.parametrize( + "scalar", + [ + timedelta(hours=2), + pd.Timedelta(hours=2), + np.timedelta64(2, "h"), + np.timedelta64(2 * 3600 * 1000, "ms"), + pd.offsets.Minute(120), + pd.offsets.Hour(2), + ], + ) + def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar): + dta, dti = dta_dti + + td = pd.Timedelta(scalar) + exp_unit = tm.get_finest_unit(dta.unit, td.unit) + + expected = (dti + td)._data.as_unit(exp_unit) + result = dta + scalar + tm.assert_extension_array_equal(result, expected) + + result = scalar + dta + tm.assert_extension_array_equal(result, expected) + + expected = (dti - td)._data.as_unit(exp_unit) + result = dta - scalar + tm.assert_extension_array_equal(result, expected) + + def test_sub_datetimelike_scalar_mismatch(self): + dti = pd.date_range("2016-01-01", periods=3) + dta = dti._data.as_unit("us") + + ts = dta[0].as_unit("s") + + result = dta - ts + expected = (dti - dti[0])._data.as_unit("us") + assert result.dtype == "m8[us]" + tm.assert_extension_array_equal(result, expected) + + def test_sub_datetime64_reso_mismatch(self): + dti = pd.date_range("2016-01-01", periods=3) + left = dti._data.as_unit("s") + right = left.as_unit("ms") + + result = left - right + exp_values = np.array([0, 0, 0], dtype="m8[ms]") + expected = TimedeltaArray._simple_new( + exp_values, + dtype=exp_values.dtype, + ) + tm.assert_extension_array_equal(result, expected) + result2 = right - left + tm.assert_extension_array_equal(result2, expected) + + +class TestDatetimeArrayComparisons: + # TODO: merge this into tests/arithmetic/test_datetime64 once it is + # sufficiently robust + + def test_cmp_dt64_arraylike_tznaive(self, comparison_op): + # arbitrary tz-naive DatetimeIndex + op = comparison_op + + dti = pd.date_range("2016-01-1", freq="MS", periods=9, tz=None) + arr = dti._data + assert arr.freq == dti.freq + assert arr.tz == dti.tz + + right = dti + + expected = np.ones(len(arr), dtype=bool) + if comparison_op.__name__ in ["ne", "gt", "lt"]: + # for these the comparisons should be all-False + expected = ~expected + + result = op(arr, arr) + tm.assert_numpy_array_equal(result, expected) + for other in [ + right, + np.array(right), + list(right), + tuple(right), + right.astype(object), + ]: + result = op(arr, other) + tm.assert_numpy_array_equal(result, expected) + + result = op(other, arr) + tm.assert_numpy_array_equal(result, expected) + + +class TestDatetimeArray: + def test_astype_ns_to_ms_near_bounds(self): + # GH#55979 + ts = pd.Timestamp("1677-09-21 00:12:43.145225") + target = ts.as_unit("ms") + + dta = DatetimeArray._from_sequence([ts], dtype="M8[ns]") + assert (dta.view("i8") == ts.as_unit("ns").value).all() + + result = dta.astype("M8[ms]") + assert result[0] == target + + expected = DatetimeArray._from_sequence([ts], dtype="M8[ms]") + assert (expected.view("i8") == target._value).all() + + tm.assert_datetime_array_equal(result, expected) + + def test_astype_non_nano_tznaive(self): + dti = pd.date_range("2016-01-01", periods=3) + + res = dti.astype("M8[s]") + assert res.dtype == "M8[s]" + + dta = dti._data + res = dta.astype("M8[s]") + assert res.dtype == "M8[s]" + assert isinstance(res, pd.core.arrays.DatetimeArray) # used to be ndarray + + def test_astype_non_nano_tzaware(self): + dti = pd.date_range("2016-01-01", periods=3, tz="UTC") + + res = dti.astype("M8[s, US/Pacific]") + assert res.dtype == "M8[s, US/Pacific]" + + dta = dti._data + res = dta.astype("M8[s, US/Pacific]") + assert res.dtype == "M8[s, US/Pacific]" + + # from non-nano to non-nano, preserving reso + res2 = res.astype("M8[s, UTC]") + assert res2.dtype == "M8[s, UTC]" + assert not tm.shares_memory(res2, res) + + res3 = res.astype("M8[s, UTC]", copy=False) + assert res2.dtype == "M8[s, UTC]" + assert tm.shares_memory(res3, res) + + def test_astype_to_same(self): + arr = DatetimeArray._from_sequence( + ["2000"], dtype=DatetimeTZDtype(tz="US/Central") + ) + result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False) + assert result is arr + + @pytest.mark.parametrize("dtype", ["datetime64[ns]", "datetime64[ns, UTC]"]) + @pytest.mark.parametrize( + "other", ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, CET]"] + ) + def test_astype_copies(self, dtype, other): + # https://github.com/pandas-dev/pandas/pull/32490 + ser = pd.Series([1, 2], dtype=dtype) + orig = ser.copy() + + err = False + if (dtype == "datetime64[ns]") ^ (other == "datetime64[ns]"): + # deprecated in favor of tz_localize + err = True + + if err: + if dtype == "datetime64[ns]": + msg = "Use obj.tz_localize instead or series.dt.tz_localize instead" + else: + msg = "from timezone-aware dtype to timezone-naive dtype" + with pytest.raises(TypeError, match=msg): + ser.astype(other) + else: + t = ser.astype(other) + t[:] = pd.NaT + tm.assert_series_equal(ser, orig) + + @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) + def test_astype_int(self, dtype): + arr = DatetimeArray._from_sequence( + [pd.Timestamp("2000"), pd.Timestamp("2001")], dtype="M8[ns]" + ) + + if np.dtype(dtype) != np.int64: + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype(dtype) + return + + result = arr.astype(dtype) + expected = arr._ndarray.view("i8") + tm.assert_numpy_array_equal(result, expected) + + def test_astype_to_sparse_dt64(self): + # GH#50082 + dti = pd.date_range("2016-01-01", periods=4) + dta = dti._data + result = dta.astype("Sparse[datetime64[ns]]") + + assert result.dtype == "Sparse[datetime64[ns]]" + assert (result == dta).all() + + def test_tz_setter_raises(self): + arr = DatetimeArray._from_sequence( + ["2000"], dtype=DatetimeTZDtype(tz="US/Central") + ) + with pytest.raises(AttributeError, match="tz_localize"): + arr.tz = "UTC" + + def test_setitem_str_impute_tz(self, tz_naive_fixture): + # Like for getitem, if we are passed a naive-like string, we impute + # our own timezone. + tz = tz_naive_fixture + + data = np.array([1, 2, 3], dtype="M8[ns]") + dtype = data.dtype if tz is None else DatetimeTZDtype(tz=tz) + arr = DatetimeArray._from_sequence(data, dtype=dtype) + expected = arr.copy() + + ts = pd.Timestamp("2020-09-08 16:50").tz_localize(tz) + setter = str(ts.tz_localize(None)) + + # Setting a scalar tznaive string + expected[0] = ts + arr[0] = setter + tm.assert_equal(arr, expected) + + # Setting a listlike of tznaive strings + expected[1] = ts + arr[:2] = [setter, setter] + tm.assert_equal(arr, expected) + + def test_setitem_different_tz_raises(self): + # pre-2.0 we required exact tz match, in 2.0 we require only + # tzawareness-match + data = np.array([1, 2, 3], dtype="M8[ns]") + arr = DatetimeArray._from_sequence( + data, copy=False, dtype=DatetimeTZDtype(tz="US/Central") + ) + with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"): + arr[0] = pd.Timestamp("2000") + + ts = pd.Timestamp("2000", tz="US/Eastern") + arr[0] = ts + assert arr[0] == ts.tz_convert("US/Central") + + def test_setitem_clears_freq(self): + a = pd.date_range("2000", periods=2, freq="D", tz="US/Central")._data + a[0] = pd.Timestamp("2000", tz="US/Central") + assert a.freq is None + + @pytest.mark.parametrize( + "obj", + [ + pd.Timestamp("2021-01-01"), + pd.Timestamp("2021-01-01").to_datetime64(), + pd.Timestamp("2021-01-01").to_pydatetime(), + ], + ) + def test_setitem_objects(self, obj): + # make sure we accept datetime64 and datetime in addition to Timestamp + dti = pd.date_range("2000", periods=2, freq="D") + arr = dti._data + + arr[0] = obj + assert arr[0] == obj + + def test_repeat_preserves_tz(self): + dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central") + arr = dti._data + + repeated = arr.repeat([1, 1]) + + # preserves tz and values, but not freq + expected = DatetimeArray._from_sequence(arr.asi8, dtype=arr.dtype) + tm.assert_equal(repeated, expected) + + def test_value_counts_preserves_tz(self): + dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central") + arr = dti._data.repeat([4, 3]) + + result = arr.value_counts() + + # Note: not tm.assert_index_equal, since `freq`s do not match + assert result.index.equals(dti) + + arr[-2] = pd.NaT + result = arr.value_counts(dropna=False) + expected = pd.Series([4, 2, 1], index=[dti[0], dti[1], pd.NaT], name="count") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("method", ["pad", "backfill"]) + def test_fillna_preserves_tz(self, method): + dti = pd.date_range("2000-01-01", periods=5, freq="D", tz="US/Central") + arr = DatetimeArray._from_sequence(dti, copy=True) + arr[2] = pd.NaT + + fill_val = dti[1] if method == "pad" else dti[3] + expected = DatetimeArray._from_sequence( + [dti[0], dti[1], fill_val, dti[3], dti[4]], + dtype=DatetimeTZDtype(tz="US/Central"), + ) + + result = arr._pad_or_backfill(method=method) + tm.assert_extension_array_equal(result, expected) + + # assert that arr and dti were not modified in-place + assert arr[2] is pd.NaT + assert dti[2] == pd.Timestamp("2000-01-03", tz="US/Central") + + def test_fillna_2d(self): + dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific") + dta = dti._data.reshape(3, 2).copy() + dta[0, 1] = pd.NaT + dta[1, 0] = pd.NaT + + res1 = dta._pad_or_backfill(method="pad") + expected1 = dta.copy() + expected1[1, 0] = dta[0, 0] + tm.assert_extension_array_equal(res1, expected1) + + res2 = dta._pad_or_backfill(method="backfill") + expected2 = dta.copy() + expected2 = dta.copy() + expected2[1, 0] = dta[2, 0] + expected2[0, 1] = dta[1, 1] + tm.assert_extension_array_equal(res2, expected2) + + # with different ordering for underlying ndarray; behavior should + # be unchanged + dta2 = dta._from_backing_data(dta._ndarray.copy(order="F")) + assert dta2._ndarray.flags["F_CONTIGUOUS"] + assert not dta2._ndarray.flags["C_CONTIGUOUS"] + tm.assert_extension_array_equal(dta, dta2) + + res3 = dta2._pad_or_backfill(method="pad") + tm.assert_extension_array_equal(res3, expected1) + + res4 = dta2._pad_or_backfill(method="backfill") + tm.assert_extension_array_equal(res4, expected2) + + # test the DataFrame method while we're here + df = pd.DataFrame(dta) + res = df.ffill() + expected = pd.DataFrame(expected1) + tm.assert_frame_equal(res, expected) + + res = df.bfill() + expected = pd.DataFrame(expected2) + tm.assert_frame_equal(res, expected) + + def test_array_interface_tz(self): + tz = "US/Central" + data = pd.date_range("2017", periods=2, tz=tz)._data + result = np.asarray(data) + + expected = np.array( + [ + pd.Timestamp("2017-01-01T00:00:00", tz=tz), + pd.Timestamp("2017-01-02T00:00:00", tz=tz), + ], + dtype=object, + ) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(data, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(data, dtype="M8[ns]") + + expected = np.array( + ["2017-01-01T06:00:00", "2017-01-02T06:00:00"], dtype="M8[ns]" + ) + tm.assert_numpy_array_equal(result, expected) + + def test_array_interface(self): + data = pd.date_range("2017", periods=2)._data + expected = np.array( + ["2017-01-01T00:00:00", "2017-01-02T00:00:00"], dtype="datetime64[ns]" + ) + + result = np.asarray(data) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(data, dtype=object) + expected = np.array( + [pd.Timestamp("2017-01-01T00:00:00"), pd.Timestamp("2017-01-02T00:00:00")], + dtype=object, + ) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("index", [True, False]) + def test_searchsorted_different_tz(self, index): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + arr = pd.DatetimeIndex(data, freq="D")._data.tz_localize("Asia/Tokyo") + if index: + arr = pd.Index(arr) + + expected = arr.searchsorted(arr[2]) + result = arr.searchsorted(arr[2].tz_convert("UTC")) + assert result == expected + + expected = arr.searchsorted(arr[2:6]) + result = arr.searchsorted(arr[2:6].tz_convert("UTC")) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("index", [True, False]) + def test_searchsorted_tzawareness_compat(self, index): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + arr = pd.DatetimeIndex(data, freq="D")._data + if index: + arr = pd.Index(arr) + + mismatch = arr.tz_localize("Asia/Tokyo") + + msg = "Cannot compare tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + arr.searchsorted(mismatch[0]) + with pytest.raises(TypeError, match=msg): + arr.searchsorted(mismatch) + + with pytest.raises(TypeError, match=msg): + mismatch.searchsorted(arr[0]) + with pytest.raises(TypeError, match=msg): + mismatch.searchsorted(arr) + + @pytest.mark.parametrize( + "other", + [ + 1, + np.int64(1), + 1.0, + np.timedelta64("NaT"), + pd.Timedelta(days=2), + "invalid", + np.arange(10, dtype="i8") * 24 * 3600 * 10**9, + np.arange(10).view("timedelta64[ns]") * 24 * 3600 * 10**9, + pd.Timestamp("2021-01-01").to_period("D"), + ], + ) + @pytest.mark.parametrize("index", [True, False]) + def test_searchsorted_invalid_types(self, other, index): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + arr = pd.DatetimeIndex(data, freq="D")._data + if index: + arr = pd.Index(arr) + + msg = "|".join( + [ + "searchsorted requires compatible dtype or scalar", + "value should be a 'Timestamp', 'NaT', or array of those. Got", + ] + ) + with pytest.raises(TypeError, match=msg): + arr.searchsorted(other) + + def test_shift_fill_value(self): + dti = pd.date_range("2016-01-01", periods=3) + + dta = dti._data + expected = DatetimeArray._from_sequence(np.roll(dta._ndarray, 1)) + + fv = dta[-1] + for fill_value in [fv, fv.to_pydatetime(), fv.to_datetime64()]: + result = dta.shift(1, fill_value=fill_value) + tm.assert_datetime_array_equal(result, expected) + + dta = dta.tz_localize("UTC") + expected = expected.tz_localize("UTC") + fv = dta[-1] + for fill_value in [fv, fv.to_pydatetime()]: + result = dta.shift(1, fill_value=fill_value) + tm.assert_datetime_array_equal(result, expected) + + def test_shift_value_tzawareness_mismatch(self): + dti = pd.date_range("2016-01-01", periods=3) + + dta = dti._data + + fv = dta[-1].tz_localize("UTC") + for invalid in [fv, fv.to_pydatetime()]: + with pytest.raises(TypeError, match="Cannot compare"): + dta.shift(1, fill_value=invalid) + + dta = dta.tz_localize("UTC") + fv = dta[-1].tz_localize(None) + for invalid in [fv, fv.to_pydatetime(), fv.to_datetime64()]: + with pytest.raises(TypeError, match="Cannot compare"): + dta.shift(1, fill_value=invalid) + + def test_shift_requires_tzmatch(self): + # pre-2.0 we required exact tz match, in 2.0 we require just + # matching tzawareness + dti = pd.date_range("2016-01-01", periods=3, tz="UTC") + dta = dti._data + + fill_value = pd.Timestamp("2020-10-18 18:44", tz="US/Pacific") + + result = dta.shift(1, fill_value=fill_value) + expected = dta.shift(1, fill_value=fill_value.tz_convert("UTC")) + tm.assert_equal(result, expected) + + def test_tz_localize_t2d(self): + dti = pd.date_range("1994-05-12", periods=12, tz="US/Pacific") + dta = dti._data.reshape(3, 4) + result = dta.tz_localize(None) + + expected = dta.ravel().tz_localize(None).reshape(dta.shape) + tm.assert_datetime_array_equal(result, expected) + + roundtrip = expected.tz_localize("US/Pacific") + tm.assert_datetime_array_equal(roundtrip, dta) + + easts = ["US/Eastern", "dateutil/US/Eastern"] + if ZoneInfo is not None: + try: + tz = ZoneInfo("US/Eastern") + except KeyError: + # no tzdata + pass + else: + # Argument 1 to "append" of "list" has incompatible type "ZoneInfo"; + # expected "str" + easts.append(tz) # type: ignore[arg-type] + + @pytest.mark.parametrize("tz", easts) + def test_iter_zoneinfo_fold(self, tz): + # GH#49684 + utc_vals = np.array( + [1320552000, 1320555600, 1320559200, 1320562800], dtype=np.int64 + ) + utc_vals *= 1_000_000_000 + + dta = DatetimeArray._from_sequence(utc_vals).tz_localize("UTC").tz_convert(tz) + + left = dta[2] + right = list(dta)[2] + assert str(left) == str(right) + # previously there was a bug where with non-pytz right would be + # Timestamp('2011-11-06 01:00:00-0400', tz='US/Eastern') + # while left would be + # Timestamp('2011-11-06 01:00:00-0500', tz='US/Eastern') + # The .value's would match (so they would compare as equal), + # but the folds would not + assert left.utcoffset() == right.utcoffset() + + # The same bug in ints_to_pydatetime affected .astype, so we test + # that here. + right2 = dta.astype(object)[2] + assert str(left) == str(right2) + assert left.utcoffset() == right2.utcoffset() + + @pytest.mark.parametrize( + "freq, freq_depr", + [ + ("2ME", "2M"), + ("2SME", "2SM"), + ("2SME", "2sm"), + ("2QE", "2Q"), + ("2QE-SEP", "2Q-SEP"), + ("1YE", "1Y"), + ("2YE-MAR", "2Y-MAR"), + ("1YE", "1A"), + ("2YE-MAR", "2A-MAR"), + ("2ME", "2m"), + ("2QE-SEP", "2q-sep"), + ("2YE-MAR", "2a-mar"), + ("2YE", "2y"), + ], + ) + def test_date_range_frequency_M_Q_Y_A_deprecated(self, freq, freq_depr): + # GH#9586, GH#54275 + depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed " + f"in a future version, please use '{freq[1:]}' instead." + + expected = pd.date_range("1/1/2000", periods=4, freq=freq) + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = pd.date_range("1/1/2000", periods=4, freq=freq_depr) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("freq_depr", ["2H", "2CBH", "2MIN", "2S", "2mS", "2Us"]) + def test_date_range_uppercase_frequency_deprecated(self, freq_depr): + # GH#9586, GH#54939 + depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a " + f"future version. Please use '{freq_depr.lower()[1:]}' instead." + + expected = pd.date_range("1/1/2000", periods=4, freq=freq_depr.lower()) + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = pd.date_range("1/1/2000", periods=4, freq=freq_depr) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "freq_depr", + [ + "2ye-mar", + "2ys", + "2qe", + "2qs-feb", + "2bqs", + "2sms", + "2bms", + "2cbme", + "2me", + "2w", + ], + ) + def test_date_range_lowercase_frequency_deprecated(self, freq_depr): + # GH#9586, GH#54939 + depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a " + f"future version, please use '{freq_depr.upper()[1:]}' instead." + + expected = pd.date_range("1/1/2000", periods=4, freq=freq_depr.upper()) + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = pd.date_range("1/1/2000", periods=4, freq=freq_depr) + tm.assert_index_equal(result, expected) + + +def test_factorize_sort_without_freq(): + dta = DatetimeArray._from_sequence([0, 2, 1], dtype="M8[ns]") + + msg = r"call pd.factorize\(obj, sort=True\) instead" + with pytest.raises(NotImplementedError, match=msg): + dta.factorize(sort=True) + + # Do TimedeltaArray while we're here + tda = dta - dta[0] + with pytest.raises(NotImplementedError, match=msg): + tda.factorize(sort=True) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_timedeltas.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_timedeltas.py new file mode 100644 index 0000000000000000000000000000000000000000..a3f15467feb144ee21883a0a2a777e3b5e0cdf42 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/test_timedeltas.py @@ -0,0 +1,313 @@ +from datetime import timedelta + +import numpy as np +import pytest + +import pandas as pd +from pandas import Timedelta +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) + + +class TestNonNano: + @pytest.fixture(params=["s", "ms", "us"]) + def unit(self, request): + return request.param + + @pytest.fixture + def tda(self, unit): + arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]") + return TimedeltaArray._simple_new(arr, dtype=arr.dtype) + + def test_non_nano(self, unit): + arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]") + tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype) + + assert tda.dtype == arr.dtype + assert tda[0].unit == unit + + def test_as_unit_raises(self, tda): + # GH#50616 + with pytest.raises(ValueError, match="Supported units"): + tda.as_unit("D") + + tdi = pd.Index(tda) + with pytest.raises(ValueError, match="Supported units"): + tdi.as_unit("D") + + @pytest.mark.parametrize("field", TimedeltaArray._field_ops) + def test_fields(self, tda, field): + as_nano = tda._ndarray.astype("m8[ns]") + tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype) + + result = getattr(tda, field) + expected = getattr(tda_nano, field) + tm.assert_numpy_array_equal(result, expected) + + def test_to_pytimedelta(self, tda): + as_nano = tda._ndarray.astype("m8[ns]") + tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype) + + result = tda.to_pytimedelta() + expected = tda_nano.to_pytimedelta() + tm.assert_numpy_array_equal(result, expected) + + def test_total_seconds(self, unit, tda): + as_nano = tda._ndarray.astype("m8[ns]") + tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype) + + result = tda.total_seconds() + expected = tda_nano.total_seconds() + tm.assert_numpy_array_equal(result, expected) + + def test_timedelta_array_total_seconds(self): + # GH34290 + expected = Timedelta("2 min").total_seconds() + + result = pd.array([Timedelta("2 min")]).total_seconds()[0] + assert result == expected + + def test_total_seconds_nanoseconds(self): + # issue #48521 + start_time = pd.Series(["2145-11-02 06:00:00"]).astype("datetime64[ns]") + end_time = pd.Series(["2145-11-02 07:06:00"]).astype("datetime64[ns]") + expected = (end_time - start_time).values / np.timedelta64(1, "s") + result = (end_time - start_time).dt.total_seconds().values + assert result == expected + + @pytest.mark.parametrize( + "nat", [np.datetime64("NaT", "ns"), np.datetime64("NaT", "us")] + ) + def test_add_nat_datetimelike_scalar(self, nat, tda): + result = tda + nat + assert isinstance(result, DatetimeArray) + assert result._creso == tda._creso + assert result.isna().all() + + result = nat + tda + assert isinstance(result, DatetimeArray) + assert result._creso == tda._creso + assert result.isna().all() + + def test_add_pdnat(self, tda): + result = tda + pd.NaT + assert isinstance(result, TimedeltaArray) + assert result._creso == tda._creso + assert result.isna().all() + + result = pd.NaT + tda + assert isinstance(result, TimedeltaArray) + assert result._creso == tda._creso + assert result.isna().all() + + # TODO: 2022-07-11 this is the only test that gets to DTA.tz_convert + # or tz_localize with non-nano; implement tests specific to that. + def test_add_datetimelike_scalar(self, tda, tz_naive_fixture): + ts = pd.Timestamp("2016-01-01", tz=tz_naive_fixture).as_unit("ns") + + expected = tda.as_unit("ns") + ts + res = tda + ts + tm.assert_extension_array_equal(res, expected) + res = ts + tda + tm.assert_extension_array_equal(res, expected) + + ts += Timedelta(1) # case where we can't cast losslessly + + exp_values = tda._ndarray + ts.asm8 + expected = ( + DatetimeArray._simple_new(exp_values, dtype=exp_values.dtype) + .tz_localize("UTC") + .tz_convert(ts.tz) + ) + + result = tda + ts + tm.assert_extension_array_equal(result, expected) + + result = ts + tda + tm.assert_extension_array_equal(result, expected) + + def test_mul_scalar(self, tda): + other = 2 + result = tda * other + expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_mul_listlike(self, tda): + other = np.arange(len(tda)) + result = tda * other + expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_mul_listlike_object(self, tda): + other = np.arange(len(tda)) + result = tda * other.astype(object) + expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_div_numeric_scalar(self, tda): + other = 2 + result = tda / other + expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_div_td_scalar(self, tda): + other = timedelta(seconds=1) + result = tda / other + expected = tda._ndarray / np.timedelta64(1, "s") + tm.assert_numpy_array_equal(result, expected) + + def test_div_numeric_array(self, tda): + other = np.arange(len(tda)) + result = tda / other + expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_div_td_array(self, tda): + other = tda._ndarray + tda._ndarray[-1] + result = tda / other + expected = tda._ndarray / other + tm.assert_numpy_array_equal(result, expected) + + def test_add_timedeltaarraylike(self, tda): + tda_nano = tda.astype("m8[ns]") + + expected = tda_nano * 2 + res = tda_nano + tda + tm.assert_extension_array_equal(res, expected) + res = tda + tda_nano + tm.assert_extension_array_equal(res, expected) + + expected = tda_nano * 0 + res = tda - tda_nano + tm.assert_extension_array_equal(res, expected) + + res = tda_nano - tda + tm.assert_extension_array_equal(res, expected) + + +class TestTimedeltaArray: + @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) + def test_astype_int(self, dtype): + arr = TimedeltaArray._from_sequence( + [Timedelta("1h"), Timedelta("2h")], dtype="m8[ns]" + ) + + if np.dtype(dtype) != np.int64: + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype(dtype) + return + + result = arr.astype(dtype) + expected = arr._ndarray.view("i8") + tm.assert_numpy_array_equal(result, expected) + + def test_setitem_clears_freq(self): + a = pd.timedelta_range("1h", periods=2, freq="h")._data + a[0] = Timedelta("1h") + assert a.freq is None + + @pytest.mark.parametrize( + "obj", + [ + Timedelta(seconds=1), + Timedelta(seconds=1).to_timedelta64(), + Timedelta(seconds=1).to_pytimedelta(), + ], + ) + def test_setitem_objects(self, obj): + # make sure we accept timedelta64 and timedelta in addition to Timedelta + tdi = pd.timedelta_range("2 Days", periods=4, freq="h") + arr = tdi._data + + arr[0] = obj + assert arr[0] == Timedelta(seconds=1) + + @pytest.mark.parametrize( + "other", + [ + 1, + np.int64(1), + 1.0, + np.datetime64("NaT"), + pd.Timestamp("2021-01-01"), + "invalid", + np.arange(10, dtype="i8") * 24 * 3600 * 10**9, + (np.arange(10) * 24 * 3600 * 10**9).view("datetime64[ns]"), + pd.Timestamp("2021-01-01").to_period("D"), + ], + ) + @pytest.mark.parametrize("index", [True, False]) + def test_searchsorted_invalid_types(self, other, index): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + arr = pd.TimedeltaIndex(data, freq="D")._data + if index: + arr = pd.Index(arr) + + msg = "|".join( + [ + "searchsorted requires compatible dtype or scalar", + "value should be a 'Timedelta', 'NaT', or array of those. Got", + ] + ) + with pytest.raises(TypeError, match=msg): + arr.searchsorted(other) + + +class TestUnaryOps: + def test_abs(self): + vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]") + arr = TimedeltaArray._from_sequence(vals) + + evals = np.array([3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]") + expected = TimedeltaArray._from_sequence(evals) + + result = abs(arr) + tm.assert_timedelta_array_equal(result, expected) + + result2 = np.abs(arr) + tm.assert_timedelta_array_equal(result2, expected) + + def test_pos(self): + vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]") + arr = TimedeltaArray._from_sequence(vals) + + result = +arr + tm.assert_timedelta_array_equal(result, arr) + assert not tm.shares_memory(result, arr) + + result2 = np.positive(arr) + tm.assert_timedelta_array_equal(result2, arr) + assert not tm.shares_memory(result2, arr) + + def test_neg(self): + vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]") + arr = TimedeltaArray._from_sequence(vals) + + evals = np.array([3600 * 10**9, "NaT", -7200 * 10**9], dtype="m8[ns]") + expected = TimedeltaArray._from_sequence(evals) + + result = -arr + tm.assert_timedelta_array_equal(result, expected) + + result2 = np.negative(arr) + tm.assert_timedelta_array_equal(result2, expected) + + def test_neg_freq(self): + tdi = pd.timedelta_range("2 Days", periods=4, freq="h") + arr = tdi._data + + expected = -tdi._data + + result = -arr + tm.assert_timedelta_array_equal(result, expected) + + result2 = np.negative(arr) + tm.assert_timedelta_array_equal(result2, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b35ad0a968922b6071a981e4d4629e7fc9adacdd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6060f1f2330d36d8bc1559bdecea84c8746c5111 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_localization.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_localization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bca7cfddd8058837ac0169ae90af4283b5495211 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/__pycache__/test_localization.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/test_config.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..f49ae942423992f6dbb209e8f931f091e900ba12 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/test_config.py @@ -0,0 +1,437 @@ +import pytest + +from pandas._config import config as cf +from pandas._config.config import OptionError + +import pandas as pd +import pandas._testing as tm + + +class TestConfig: + @pytest.fixture(autouse=True) + def clean_config(self, monkeypatch): + with monkeypatch.context() as m: + m.setattr(cf, "_global_config", {}) + m.setattr(cf, "options", cf.DictWrapper(cf._global_config)) + m.setattr(cf, "_deprecated_options", {}) + m.setattr(cf, "_registered_options", {}) + + # Our test fixture in conftest.py sets "chained_assignment" + # to "raise" only after all test methods have been setup. + # However, after this setup, there is no longer any + # "chained_assignment" option, so re-register it. + cf.register_option("chained_assignment", "raise") + yield + + def test_api(self): + # the pandas object exposes the user API + assert hasattr(pd, "get_option") + assert hasattr(pd, "set_option") + assert hasattr(pd, "reset_option") + assert hasattr(pd, "describe_option") + + def test_is_one_of_factory(self): + v = cf.is_one_of_factory([None, 12]) + + v(12) + v(None) + msg = r"Value must be one of None\|12" + with pytest.raises(ValueError, match=msg): + v(1.1) + + def test_register_option(self): + cf.register_option("a", 1, "doc") + + # can't register an already registered option + msg = "Option 'a' has already been registered" + with pytest.raises(OptionError, match=msg): + cf.register_option("a", 1, "doc") + + # can't register an already registered option + msg = "Path prefix to option 'a' is already an option" + with pytest.raises(OptionError, match=msg): + cf.register_option("a.b.c.d1", 1, "doc") + with pytest.raises(OptionError, match=msg): + cf.register_option("a.b.c.d2", 1, "doc") + + # no python keywords + msg = "for is a python keyword" + with pytest.raises(ValueError, match=msg): + cf.register_option("for", 0) + with pytest.raises(ValueError, match=msg): + cf.register_option("a.for.b", 0) + # must be valid identifier (ensure attribute access works) + msg = "oh my goddess! is not a valid identifier" + with pytest.raises(ValueError, match=msg): + cf.register_option("Oh my Goddess!", 0) + + # we can register options several levels deep + # without predefining the intermediate steps + # and we can define differently named options + # in the same namespace + cf.register_option("k.b.c.d1", 1, "doc") + cf.register_option("k.b.c.d2", 1, "doc") + + def test_describe_option(self): + cf.register_option("a", 1, "doc") + cf.register_option("b", 1, "doc2") + cf.deprecate_option("b") + + cf.register_option("c.d.e1", 1, "doc3") + cf.register_option("c.d.e2", 1, "doc4") + cf.register_option("f", 1) + cf.register_option("g.h", 1) + cf.register_option("k", 2) + cf.deprecate_option("g.h", rkey="k") + cf.register_option("l", "foo") + + # non-existent keys raise KeyError + msg = r"No such keys\(s\)" + with pytest.raises(OptionError, match=msg): + cf.describe_option("no.such.key") + + # we can get the description for any key we registered + assert "doc" in cf.describe_option("a", _print_desc=False) + assert "doc2" in cf.describe_option("b", _print_desc=False) + assert "precated" in cf.describe_option("b", _print_desc=False) + assert "doc3" in cf.describe_option("c.d.e1", _print_desc=False) + assert "doc4" in cf.describe_option("c.d.e2", _print_desc=False) + + # if no doc is specified we get a default message + # saying "description not available" + assert "available" in cf.describe_option("f", _print_desc=False) + assert "available" in cf.describe_option("g.h", _print_desc=False) + assert "precated" in cf.describe_option("g.h", _print_desc=False) + assert "k" in cf.describe_option("g.h", _print_desc=False) + + # default is reported + assert "foo" in cf.describe_option("l", _print_desc=False) + # current value is reported + assert "bar" not in cf.describe_option("l", _print_desc=False) + cf.set_option("l", "bar") + assert "bar" in cf.describe_option("l", _print_desc=False) + + def test_case_insensitive(self): + cf.register_option("KanBAN", 1, "doc") + + assert "doc" in cf.describe_option("kanbaN", _print_desc=False) + assert cf.get_option("kanBaN") == 1 + cf.set_option("KanBan", 2) + assert cf.get_option("kAnBaN") == 2 + + # gets of non-existent keys fail + msg = r"No such keys\(s\): 'no_such_option'" + with pytest.raises(OptionError, match=msg): + cf.get_option("no_such_option") + cf.deprecate_option("KanBan") + + assert cf._is_deprecated("kAnBaN") + + def test_get_option(self): + cf.register_option("a", 1, "doc") + cf.register_option("b.c", "hullo", "doc2") + cf.register_option("b.b", None, "doc2") + + # gets of existing keys succeed + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + assert cf.get_option("b.b") is None + + # gets of non-existent keys fail + msg = r"No such keys\(s\): 'no_such_option'" + with pytest.raises(OptionError, match=msg): + cf.get_option("no_such_option") + + def test_set_option(self): + cf.register_option("a", 1, "doc") + cf.register_option("b.c", "hullo", "doc2") + cf.register_option("b.b", None, "doc2") + + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + assert cf.get_option("b.b") is None + + cf.set_option("a", 2) + cf.set_option("b.c", "wurld") + cf.set_option("b.b", 1.1) + + assert cf.get_option("a") == 2 + assert cf.get_option("b.c") == "wurld" + assert cf.get_option("b.b") == 1.1 + + msg = r"No such keys\(s\): 'no.such.key'" + with pytest.raises(OptionError, match=msg): + cf.set_option("no.such.key", None) + + def test_set_option_empty_args(self): + msg = "Must provide an even number of non-keyword arguments" + with pytest.raises(ValueError, match=msg): + cf.set_option() + + def test_set_option_uneven_args(self): + msg = "Must provide an even number of non-keyword arguments" + with pytest.raises(ValueError, match=msg): + cf.set_option("a.b", 2, "b.c") + + def test_set_option_invalid_single_argument_type(self): + msg = "Must provide an even number of non-keyword arguments" + with pytest.raises(ValueError, match=msg): + cf.set_option(2) + + def test_set_option_multiple(self): + cf.register_option("a", 1, "doc") + cf.register_option("b.c", "hullo", "doc2") + cf.register_option("b.b", None, "doc2") + + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + assert cf.get_option("b.b") is None + + cf.set_option("a", "2", "b.c", None, "b.b", 10.0) + + assert cf.get_option("a") == "2" + assert cf.get_option("b.c") is None + assert cf.get_option("b.b") == 10.0 + + def test_validation(self): + cf.register_option("a", 1, "doc", validator=cf.is_int) + cf.register_option("d", 1, "doc", validator=cf.is_nonnegative_int) + cf.register_option("b.c", "hullo", "doc2", validator=cf.is_text) + + msg = "Value must have type ''" + with pytest.raises(ValueError, match=msg): + cf.register_option("a.b.c.d2", "NO", "doc", validator=cf.is_int) + + cf.set_option("a", 2) # int is_int + cf.set_option("b.c", "wurld") # str is_str + cf.set_option("d", 2) + cf.set_option("d", None) # non-negative int can be None + + # None not is_int + with pytest.raises(ValueError, match=msg): + cf.set_option("a", None) + with pytest.raises(ValueError, match=msg): + cf.set_option("a", "ab") + + msg = "Value must be a nonnegative integer or None" + with pytest.raises(ValueError, match=msg): + cf.register_option("a.b.c.d3", "NO", "doc", validator=cf.is_nonnegative_int) + with pytest.raises(ValueError, match=msg): + cf.register_option("a.b.c.d3", -2, "doc", validator=cf.is_nonnegative_int) + + msg = r"Value must be an instance of \|" + with pytest.raises(ValueError, match=msg): + cf.set_option("b.c", 1) + + validator = cf.is_one_of_factory([None, cf.is_callable]) + cf.register_option("b", lambda: None, "doc", validator=validator) + # pylint: disable-next=consider-using-f-string + cf.set_option("b", "%.1f".format) # Formatter is callable + cf.set_option("b", None) # Formatter is none (default) + with pytest.raises(ValueError, match="Value must be a callable"): + cf.set_option("b", "%.1f") + + def test_reset_option(self): + cf.register_option("a", 1, "doc", validator=cf.is_int) + cf.register_option("b.c", "hullo", "doc2", validator=cf.is_str) + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + + cf.set_option("a", 2) + cf.set_option("b.c", "wurld") + assert cf.get_option("a") == 2 + assert cf.get_option("b.c") == "wurld" + + cf.reset_option("a") + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "wurld" + cf.reset_option("b.c") + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + + def test_reset_option_all(self): + cf.register_option("a", 1, "doc", validator=cf.is_int) + cf.register_option("b.c", "hullo", "doc2", validator=cf.is_str) + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + + cf.set_option("a", 2) + cf.set_option("b.c", "wurld") + assert cf.get_option("a") == 2 + assert cf.get_option("b.c") == "wurld" + + cf.reset_option("all") + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + + def test_deprecate_option(self): + # we can deprecate non-existent options + cf.deprecate_option("foo") + + assert cf._is_deprecated("foo") + with tm.assert_produces_warning(FutureWarning, match="deprecated"): + with pytest.raises(KeyError, match="No such keys.s.: 'foo'"): + cf.get_option("foo") + + cf.register_option("a", 1, "doc", validator=cf.is_int) + cf.register_option("b.c", "hullo", "doc2") + cf.register_option("foo", "hullo", "doc2") + + cf.deprecate_option("a", removal_ver="nifty_ver") + with tm.assert_produces_warning(FutureWarning, match="eprecated.*nifty_ver"): + cf.get_option("a") + + msg = "Option 'a' has already been defined as deprecated" + with pytest.raises(OptionError, match=msg): + cf.deprecate_option("a") + + cf.deprecate_option("b.c", "zounds!") + with tm.assert_produces_warning(FutureWarning, match="zounds!"): + cf.get_option("b.c") + + # test rerouting keys + cf.register_option("d.a", "foo", "doc2") + cf.register_option("d.dep", "bar", "doc2") + assert cf.get_option("d.a") == "foo" + assert cf.get_option("d.dep") == "bar" + + cf.deprecate_option("d.dep", rkey="d.a") # reroute d.dep to d.a + with tm.assert_produces_warning(FutureWarning, match="eprecated"): + assert cf.get_option("d.dep") == "foo" + + with tm.assert_produces_warning(FutureWarning, match="eprecated"): + cf.set_option("d.dep", "baz") # should overwrite "d.a" + + with tm.assert_produces_warning(FutureWarning, match="eprecated"): + assert cf.get_option("d.dep") == "baz" + + def test_config_prefix(self): + with cf.config_prefix("base"): + cf.register_option("a", 1, "doc1") + cf.register_option("b", 2, "doc2") + assert cf.get_option("a") == 1 + assert cf.get_option("b") == 2 + + cf.set_option("a", 3) + cf.set_option("b", 4) + assert cf.get_option("a") == 3 + assert cf.get_option("b") == 4 + + assert cf.get_option("base.a") == 3 + assert cf.get_option("base.b") == 4 + assert "doc1" in cf.describe_option("base.a", _print_desc=False) + assert "doc2" in cf.describe_option("base.b", _print_desc=False) + + cf.reset_option("base.a") + cf.reset_option("base.b") + + with cf.config_prefix("base"): + assert cf.get_option("a") == 1 + assert cf.get_option("b") == 2 + + def test_callback(self): + k = [None] + v = [None] + + def callback(key): + k.append(key) + v.append(cf.get_option(key)) + + cf.register_option("d.a", "foo", cb=callback) + cf.register_option("d.b", "foo", cb=callback) + + del k[-1], v[-1] + cf.set_option("d.a", "fooz") + assert k[-1] == "d.a" + assert v[-1] == "fooz" + + del k[-1], v[-1] + cf.set_option("d.b", "boo") + assert k[-1] == "d.b" + assert v[-1] == "boo" + + del k[-1], v[-1] + cf.reset_option("d.b") + assert k[-1] == "d.b" + + def test_set_ContextManager(self): + def eq(val): + assert cf.get_option("a") == val + + cf.register_option("a", 0) + eq(0) + with cf.option_context("a", 15): + eq(15) + with cf.option_context("a", 25): + eq(25) + eq(15) + eq(0) + + cf.set_option("a", 17) + eq(17) + + # Test that option_context can be used as a decorator too (#34253). + @cf.option_context("a", 123) + def f(): + eq(123) + + f() + + def test_attribute_access(self): + holder = [] + + def f3(key): + holder.append(True) + + cf.register_option("a", 0) + cf.register_option("c", 0, cb=f3) + options = cf.options + + assert options.a == 0 + with cf.option_context("a", 15): + assert options.a == 15 + + options.a = 500 + assert cf.get_option("a") == 500 + + cf.reset_option("a") + assert options.a == cf.get_option("a", 0) + + msg = "You can only set the value of existing options" + with pytest.raises(OptionError, match=msg): + options.b = 1 + with pytest.raises(OptionError, match=msg): + options.display = 1 + + # make sure callback kicks when using this form of setting + options.c = 1 + assert len(holder) == 1 + + def test_option_context_scope(self): + # Ensure that creating a context does not affect the existing + # environment as it is supposed to be used with the `with` statement. + # See https://github.com/pandas-dev/pandas/issues/8514 + + original_value = 60 + context_value = 10 + option_name = "a" + + cf.register_option(option_name, original_value) + + # Ensure creating contexts didn't affect the current context. + ctx = cf.option_context(option_name, context_value) + assert cf.get_option(option_name) == original_value + + # Ensure the correct value is available inside the context. + with ctx: + assert cf.get_option(option_name) == context_value + + # Ensure the current context is reset + assert cf.get_option(option_name) == original_value + + def test_dictwrapper_getattr(self): + options = cf.options + # GH 19789 + with pytest.raises(OptionError, match="No such option"): + options.bananas + assert not hasattr(options, "bananas") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/test_localization.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/test_localization.py new file mode 100644 index 0000000000000000000000000000000000000000..3907f557d1075536e46d12f219dc9b0c3f3f32c1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/config/test_localization.py @@ -0,0 +1,156 @@ +import codecs +import locale +import os + +import pytest + +from pandas._config.localization import ( + can_set_locale, + get_locales, + set_locale, +) + +from pandas.compat import ISMUSL + +import pandas as pd + +_all_locales = get_locales() +_current_locale = locale.setlocale(locale.LC_ALL) # getlocale() is wrong, see GH#46595 + +# Don't run any of these tests if we have no locales. +pytestmark = pytest.mark.skipif(not _all_locales, reason="Need locales") + +_skip_if_only_one_locale = pytest.mark.skipif( + len(_all_locales) <= 1, reason="Need multiple locales for meaningful test" +) + + +def _get_current_locale(lc_var: int = locale.LC_ALL) -> str: + # getlocale is not always compliant with setlocale, use setlocale. GH#46595 + return locale.setlocale(lc_var) + + +@pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME)) +def test_can_set_current_locale(lc_var): + # Can set the current locale + before_locale = _get_current_locale(lc_var) + assert can_set_locale(before_locale, lc_var=lc_var) + after_locale = _get_current_locale(lc_var) + assert before_locale == after_locale + + +@pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME)) +def test_can_set_locale_valid_set(lc_var): + # Can set the default locale. + before_locale = _get_current_locale(lc_var) + assert can_set_locale("", lc_var=lc_var) + after_locale = _get_current_locale(lc_var) + assert before_locale == after_locale + + +@pytest.mark.parametrize( + "lc_var", + ( + locale.LC_ALL, + locale.LC_CTYPE, + pytest.param( + locale.LC_TIME, + marks=pytest.mark.skipif( + ISMUSL, reason="MUSL allows setting invalid LC_TIME." + ), + ), + ), +) +def test_can_set_locale_invalid_set(lc_var): + # Cannot set an invalid locale. + before_locale = _get_current_locale(lc_var) + assert not can_set_locale("non-existent_locale", lc_var=lc_var) + after_locale = _get_current_locale(lc_var) + assert before_locale == after_locale + + +@pytest.mark.parametrize( + "lang,enc", + [ + ("it_CH", "UTF-8"), + ("en_US", "ascii"), + ("zh_CN", "GB2312"), + ("it_IT", "ISO-8859-1"), + ], +) +@pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME)) +def test_can_set_locale_no_leak(lang, enc, lc_var): + # Test that can_set_locale does not leak even when returning False. See GH#46595 + before_locale = _get_current_locale(lc_var) + can_set_locale((lang, enc), locale.LC_ALL) + after_locale = _get_current_locale(lc_var) + assert before_locale == after_locale + + +def test_can_set_locale_invalid_get(monkeypatch): + # see GH#22129 + # In some cases, an invalid locale can be set, + # but a subsequent getlocale() raises a ValueError. + + def mock_get_locale(): + raise ValueError() + + with monkeypatch.context() as m: + m.setattr(locale, "getlocale", mock_get_locale) + assert not can_set_locale("") + + +def test_get_locales_at_least_one(): + # see GH#9744 + assert len(_all_locales) > 0 + + +@_skip_if_only_one_locale +def test_get_locales_prefix(): + first_locale = _all_locales[0] + assert len(get_locales(prefix=first_locale[:2])) > 0 + + +@_skip_if_only_one_locale +@pytest.mark.parametrize( + "lang,enc", + [ + ("it_CH", "UTF-8"), + ("en_US", "ascii"), + ("zh_CN", "GB2312"), + ("it_IT", "ISO-8859-1"), + ], +) +def test_set_locale(lang, enc): + before_locale = _get_current_locale() + + enc = codecs.lookup(enc).name + new_locale = lang, enc + + if not can_set_locale(new_locale): + msg = "unsupported locale setting" + + with pytest.raises(locale.Error, match=msg): + with set_locale(new_locale): + pass + else: + with set_locale(new_locale) as normalized_locale: + new_lang, new_enc = normalized_locale.split(".") + new_enc = codecs.lookup(enc).name + + normalized_locale = new_lang, new_enc + assert normalized_locale == new_locale + + # Once we exit the "with" statement, locale should be back to what it was. + after_locale = _get_current_locale() + assert before_locale == after_locale + + +def test_encoding_detected(): + system_locale = os.environ.get("LC_ALL") + system_encoding = system_locale.split(".")[-1] if system_locale else "utf-8" + + assert ( + codecs.lookup(pd.options.display.encoding).name + == codecs.lookup(system_encoding).name + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31c442006c19ef265c25d778234fae1c7b0427db Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..295854e6cc754409525a87cd47a5c7dd97de9b5e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_internals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6629e1f65a09a2de2d3d9adf0a064fbddba45b54 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_internals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_managers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_managers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd305610c8dddbdf738dea1e8821e29bab9fe505 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/__pycache__/test_managers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/test_api.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..1251a6ae97a1cb9304de036dba252de54e7fb10b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/test_api.py @@ -0,0 +1,86 @@ +""" +Tests for the pseudo-public API implemented in internals/api.py and exposed +in core.internals +""" + +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core import internals +from pandas.core.internals import api + + +def test_internals_api(): + assert internals.make_block is api.make_block + + +def test_namespace(): + # SUBJECT TO CHANGE + + modules = [ + "blocks", + "concat", + "managers", + "construction", + "array_manager", + "base", + "api", + "ops", + ] + expected = [ + "make_block", + "DataManager", + "ArrayManager", + "BlockManager", + "SingleDataManager", + "SingleBlockManager", + "SingleArrayManager", + "concatenate_managers", + ] + + result = [x for x in dir(internals) if not x.startswith("__")] + assert set(result) == set(expected + modules) + + +@pytest.mark.parametrize( + "name", + [ + "NumericBlock", + "ObjectBlock", + "Block", + "ExtensionBlock", + "DatetimeTZBlock", + ], +) +def test_deprecations(name): + # GH#55139 + msg = f"{name} is deprecated.* Use public APIs instead" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + getattr(internals, name) + + if name not in ["NumericBlock", "ObjectBlock"]: + # NumericBlock and ObjectBlock are not in the internals.api namespace + with tm.assert_produces_warning(DeprecationWarning, match=msg): + getattr(api, name) + + +def test_make_block_2d_with_dti(): + # GH#41168 + dti = pd.date_range("2012", periods=3, tz="UTC") + blk = api.make_block(dti, placement=[0]) + + assert blk.shape == (1, 3) + assert blk.values.shape == (1, 3) + + +def test_create_block_manager_from_blocks_deprecated(): + # GH#33892 + # If they must, downstream packages should get this from internals.api, + # not internals. + msg = ( + "create_block_manager_from_blocks is deprecated and will be " + "removed in a future version. Use public APIs instead" + ) + with tm.assert_produces_warning(DeprecationWarning, match=msg): + internals.create_block_manager_from_blocks diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/test_internals.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/test_internals.py new file mode 100644 index 0000000000000000000000000000000000000000..ce88bae6e02f2892d7c8e4ee8f6315904bbbd65a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/test_internals.py @@ -0,0 +1,1422 @@ +from datetime import ( + date, + datetime, +) +import itertools +import re + +import numpy as np +import pytest + +from pandas._libs.internals import BlockPlacement +from pandas.compat import IS64 +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_scalar + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + Series, + Timedelta, + Timestamp, + period_range, +) +import pandas._testing as tm +import pandas.core.algorithms as algos +from pandas.core.arrays import ( + DatetimeArray, + SparseArray, + TimedeltaArray, +) +from pandas.core.internals import ( + BlockManager, + SingleBlockManager, + make_block, +) +from pandas.core.internals.blocks import ( + ensure_block_shape, + maybe_coerce_values, + new_block, +) + +# this file contains BlockManager specific tests +# TODO(ArrayManager) factor out interleave_dtype tests +pytestmark = td.skip_array_manager_invalid_test + + +@pytest.fixture(params=[new_block, make_block]) +def block_maker(request): + """ + Fixture to test both the internal new_block and pseudo-public make_block. + """ + return request.param + + +@pytest.fixture +def mgr(): + return create_mgr( + "a: f8; b: object; c: f8; d: object; e: f8;" + "f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;" + "k: M8[ns, US/Eastern]; l: M8[ns, CET];" + ) + + +def assert_block_equal(left, right): + tm.assert_numpy_array_equal(left.values, right.values) + assert left.dtype == right.dtype + assert isinstance(left.mgr_locs, BlockPlacement) + assert isinstance(right.mgr_locs, BlockPlacement) + tm.assert_numpy_array_equal(left.mgr_locs.as_array, right.mgr_locs.as_array) + + +def get_numeric_mat(shape): + arr = np.arange(shape[0]) + return np.lib.stride_tricks.as_strided( + x=arr, shape=shape, strides=(arr.itemsize,) + (0,) * (len(shape) - 1) + ).copy() + + +N = 10 + + +def create_block(typestr, placement, item_shape=None, num_offset=0, maker=new_block): + """ + Supported typestr: + + * float, f8, f4, f2 + * int, i8, i4, i2, i1 + * uint, u8, u4, u2, u1 + * complex, c16, c8 + * bool + * object, string, O + * datetime, dt, M8[ns], M8[ns, tz] + * timedelta, td, m8[ns] + * sparse (SparseArray with fill_value=0.0) + * sparse_na (SparseArray with fill_value=np.nan) + * category, category2 + + """ + placement = BlockPlacement(placement) + num_items = len(placement) + + if item_shape is None: + item_shape = (N,) + + shape = (num_items,) + item_shape + + mat = get_numeric_mat(shape) + + if typestr in ( + "float", + "f8", + "f4", + "f2", + "int", + "i8", + "i4", + "i2", + "i1", + "uint", + "u8", + "u4", + "u2", + "u1", + ): + values = mat.astype(typestr) + num_offset + elif typestr in ("complex", "c16", "c8"): + values = 1.0j * (mat.astype(typestr) + num_offset) + elif typestr in ("object", "string", "O"): + values = np.reshape([f"A{i:d}" for i in mat.ravel() + num_offset], shape) + elif typestr in ("b", "bool"): + values = np.ones(shape, dtype=np.bool_) + elif typestr in ("datetime", "dt", "M8[ns]"): + values = (mat * 1e9).astype("M8[ns]") + elif typestr.startswith("M8[ns"): + # datetime with tz + m = re.search(r"M8\[ns,\s*(\w+\/?\w*)\]", typestr) + assert m is not None, f"incompatible typestr -> {typestr}" + tz = m.groups()[0] + assert num_items == 1, "must have only 1 num items for a tz-aware" + values = DatetimeIndex(np.arange(N) * 10**9, tz=tz)._data + values = ensure_block_shape(values, ndim=len(shape)) + elif typestr in ("timedelta", "td", "m8[ns]"): + values = (mat * 1).astype("m8[ns]") + elif typestr in ("category",): + values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4]) + elif typestr in ("category2",): + values = Categorical(["a", "a", "a", "a", "b", "b", "c", "c", "c", "d"]) + elif typestr in ("sparse", "sparse_na"): + if shape[-1] != 10: + # We also are implicitly assuming this in the category cases above + raise NotImplementedError + + assert all(s == 1 for s in shape[:-1]) + if typestr.endswith("_na"): + fill_value = np.nan + else: + fill_value = 0.0 + values = SparseArray( + [fill_value, fill_value, 1, 2, 3, fill_value, 4, 5, fill_value, 6], + fill_value=fill_value, + ) + arr = values.sp_values.view() + arr += num_offset - 1 + else: + raise ValueError(f'Unsupported typestr: "{typestr}"') + + values = maybe_coerce_values(values) + return maker(values, placement=placement, ndim=len(shape)) + + +def create_single_mgr(typestr, num_rows=None): + if num_rows is None: + num_rows = N + + return SingleBlockManager( + create_block(typestr, placement=slice(0, num_rows), item_shape=()), + Index(np.arange(num_rows)), + ) + + +def create_mgr(descr, item_shape=None): + """ + Construct BlockManager from string description. + + String description syntax looks similar to np.matrix initializer. It looks + like this:: + + a,b,c: f8; d,e,f: i8 + + Rules are rather simple: + + * see list of supported datatypes in `create_block` method + * components are semicolon-separated + * each component is `NAME,NAME,NAME: DTYPE_ID` + * whitespace around colons & semicolons are removed + * components with same DTYPE_ID are combined into single block + * to force multiple blocks with same dtype, use '-SUFFIX':: + + 'a:f8-1; b:f8-2; c:f8-foobar' + + """ + if item_shape is None: + item_shape = (N,) + + offset = 0 + mgr_items = [] + block_placements = {} + for d in descr.split(";"): + d = d.strip() + if not len(d): + continue + names, blockstr = d.partition(":")[::2] + blockstr = blockstr.strip() + names = names.strip().split(",") + + mgr_items.extend(names) + placement = list(np.arange(len(names)) + offset) + try: + block_placements[blockstr].extend(placement) + except KeyError: + block_placements[blockstr] = placement + offset += len(names) + + mgr_items = Index(mgr_items) + + blocks = [] + num_offset = 0 + for blockstr, placement in block_placements.items(): + typestr = blockstr.split("-")[0] + blocks.append( + create_block( + typestr, placement, item_shape=item_shape, num_offset=num_offset + ) + ) + num_offset += len(placement) + + sblocks = sorted(blocks, key=lambda b: b.mgr_locs[0]) + return BlockManager( + tuple(sblocks), + [mgr_items] + [Index(np.arange(n)) for n in item_shape], + ) + + +@pytest.fixture +def fblock(): + return create_block("float", [0, 2, 4]) + + +class TestBlock: + def test_constructor(self): + int32block = create_block("i4", [0]) + assert int32block.dtype == np.int32 + + @pytest.mark.parametrize( + "typ, data", + [ + ["float", [0, 2, 4]], + ["complex", [7]], + ["object", [1, 3]], + ["bool", [5]], + ], + ) + def test_pickle(self, typ, data): + blk = create_block(typ, data) + assert_block_equal(tm.round_trip_pickle(blk), blk) + + def test_mgr_locs(self, fblock): + assert isinstance(fblock.mgr_locs, BlockPlacement) + tm.assert_numpy_array_equal( + fblock.mgr_locs.as_array, np.array([0, 2, 4], dtype=np.intp) + ) + + def test_attrs(self, fblock): + assert fblock.shape == fblock.values.shape + assert fblock.dtype == fblock.values.dtype + assert len(fblock) == len(fblock.values) + + def test_copy(self, fblock): + cop = fblock.copy() + assert cop is not fblock + assert_block_equal(fblock, cop) + + def test_delete(self, fblock): + newb = fblock.copy() + locs = newb.mgr_locs + nb = newb.delete(0)[0] + assert newb.mgr_locs is locs + + assert nb is not newb + + tm.assert_numpy_array_equal( + nb.mgr_locs.as_array, np.array([2, 4], dtype=np.intp) + ) + assert not (newb.values[0] == 1).all() + assert (nb.values[0] == 1).all() + + newb = fblock.copy() + locs = newb.mgr_locs + nb = newb.delete(1) + assert len(nb) == 2 + assert newb.mgr_locs is locs + + tm.assert_numpy_array_equal( + nb[0].mgr_locs.as_array, np.array([0], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + nb[1].mgr_locs.as_array, np.array([4], dtype=np.intp) + ) + assert not (newb.values[1] == 2).all() + assert (nb[1].values[0] == 2).all() + + newb = fblock.copy() + nb = newb.delete(2) + assert len(nb) == 1 + tm.assert_numpy_array_equal( + nb[0].mgr_locs.as_array, np.array([0, 2], dtype=np.intp) + ) + assert (nb[0].values[1] == 1).all() + + newb = fblock.copy() + + with pytest.raises(IndexError, match=None): + newb.delete(3) + + def test_delete_datetimelike(self): + # dont use np.delete on values, as that will coerce from DTA/TDA to ndarray + arr = np.arange(20, dtype="i8").reshape(5, 4).view("m8[ns]") + df = DataFrame(arr) + blk = df._mgr.blocks[0] + assert isinstance(blk.values, TimedeltaArray) + + nb = blk.delete(1) + assert len(nb) == 2 + assert isinstance(nb[0].values, TimedeltaArray) + assert isinstance(nb[1].values, TimedeltaArray) + + df = DataFrame(arr.view("M8[ns]")) + blk = df._mgr.blocks[0] + assert isinstance(blk.values, DatetimeArray) + + nb = blk.delete([1, 3]) + assert len(nb) == 2 + assert isinstance(nb[0].values, DatetimeArray) + assert isinstance(nb[1].values, DatetimeArray) + + def test_split(self): + # GH#37799 + values = np.random.default_rng(2).standard_normal((3, 4)) + blk = new_block(values, placement=BlockPlacement([3, 1, 6]), ndim=2) + result = blk._split() + + # check that we get views, not copies + values[:] = -9999 + assert (blk.values == -9999).all() + + assert len(result) == 3 + expected = [ + new_block(values[[0]], placement=BlockPlacement([3]), ndim=2), + new_block(values[[1]], placement=BlockPlacement([1]), ndim=2), + new_block(values[[2]], placement=BlockPlacement([6]), ndim=2), + ] + for res, exp in zip(result, expected): + assert_block_equal(res, exp) + + +class TestBlockManager: + def test_attrs(self): + mgr = create_mgr("a,b,c: f8-1; d,e,f: f8-2") + assert mgr.nblocks == 2 + assert len(mgr) == 6 + + def test_duplicate_ref_loc_failure(self): + tmp_mgr = create_mgr("a:bool; a: f8") + + axes, blocks = tmp_mgr.axes, tmp_mgr.blocks + + blocks[0].mgr_locs = BlockPlacement(np.array([0])) + blocks[1].mgr_locs = BlockPlacement(np.array([0])) + + # test trying to create block manager with overlapping ref locs + + msg = "Gaps in blk ref_locs" + + with pytest.raises(AssertionError, match=msg): + mgr = BlockManager(blocks, axes) + mgr._rebuild_blknos_and_blklocs() + + blocks[0].mgr_locs = BlockPlacement(np.array([0])) + blocks[1].mgr_locs = BlockPlacement(np.array([1])) + mgr = BlockManager(blocks, axes) + mgr.iget(1) + + def test_pickle(self, mgr): + mgr2 = tm.round_trip_pickle(mgr) + tm.assert_frame_equal( + DataFrame._from_mgr(mgr, axes=mgr.axes), + DataFrame._from_mgr(mgr2, axes=mgr2.axes), + ) + + # GH2431 + assert hasattr(mgr2, "_is_consolidated") + assert hasattr(mgr2, "_known_consolidated") + + # reset to False on load + assert not mgr2._is_consolidated + assert not mgr2._known_consolidated + + @pytest.mark.parametrize("mgr_string", ["a,a,a:f8", "a: f8; a: i8"]) + def test_non_unique_pickle(self, mgr_string): + mgr = create_mgr(mgr_string) + mgr2 = tm.round_trip_pickle(mgr) + tm.assert_frame_equal( + DataFrame._from_mgr(mgr, axes=mgr.axes), + DataFrame._from_mgr(mgr2, axes=mgr2.axes), + ) + + def test_categorical_block_pickle(self): + mgr = create_mgr("a: category") + mgr2 = tm.round_trip_pickle(mgr) + tm.assert_frame_equal( + DataFrame._from_mgr(mgr, axes=mgr.axes), + DataFrame._from_mgr(mgr2, axes=mgr2.axes), + ) + + smgr = create_single_mgr("category") + smgr2 = tm.round_trip_pickle(smgr) + tm.assert_series_equal( + Series()._constructor_from_mgr(smgr, axes=smgr.axes), + Series()._constructor_from_mgr(smgr2, axes=smgr2.axes), + ) + + def test_iget(self): + cols = Index(list("abc")) + values = np.random.default_rng(2).random((3, 3)) + block = new_block( + values=values.copy(), + placement=BlockPlacement(np.arange(3, dtype=np.intp)), + ndim=values.ndim, + ) + mgr = BlockManager(blocks=(block,), axes=[cols, Index(np.arange(3))]) + + tm.assert_almost_equal(mgr.iget(0).internal_values(), values[0]) + tm.assert_almost_equal(mgr.iget(1).internal_values(), values[1]) + tm.assert_almost_equal(mgr.iget(2).internal_values(), values[2]) + + def test_set(self): + mgr = create_mgr("a,b,c: int", item_shape=(3,)) + + mgr.insert(len(mgr.items), "d", np.array(["foo"] * 3)) + mgr.iset(1, np.array(["bar"] * 3)) + tm.assert_numpy_array_equal(mgr.iget(0).internal_values(), np.array([0] * 3)) + tm.assert_numpy_array_equal( + mgr.iget(1).internal_values(), np.array(["bar"] * 3, dtype=np.object_) + ) + tm.assert_numpy_array_equal(mgr.iget(2).internal_values(), np.array([2] * 3)) + tm.assert_numpy_array_equal( + mgr.iget(3).internal_values(), np.array(["foo"] * 3, dtype=np.object_) + ) + + def test_set_change_dtype(self, mgr): + mgr.insert(len(mgr.items), "baz", np.zeros(N, dtype=bool)) + + mgr.iset(mgr.items.get_loc("baz"), np.repeat("foo", N)) + idx = mgr.items.get_loc("baz") + assert mgr.iget(idx).dtype == np.object_ + + mgr2 = mgr.consolidate() + mgr2.iset(mgr2.items.get_loc("baz"), np.repeat("foo", N)) + idx = mgr2.items.get_loc("baz") + assert mgr2.iget(idx).dtype == np.object_ + + mgr2.insert( + len(mgr2.items), + "quux", + np.random.default_rng(2).standard_normal(N).astype(int), + ) + idx = mgr2.items.get_loc("quux") + assert mgr2.iget(idx).dtype == np.dtype(int) + + mgr2.iset( + mgr2.items.get_loc("quux"), np.random.default_rng(2).standard_normal(N) + ) + assert mgr2.iget(idx).dtype == np.float64 + + def test_copy(self, mgr): + cp = mgr.copy(deep=False) + for blk, cp_blk in zip(mgr.blocks, cp.blocks): + # view assertion + tm.assert_equal(cp_blk.values, blk.values) + if isinstance(blk.values, np.ndarray): + assert cp_blk.values.base is blk.values.base + else: + # DatetimeTZBlock has DatetimeIndex values + assert cp_blk.values._ndarray.base is blk.values._ndarray.base + + # copy(deep=True) consolidates, so the block-wise assertions will + # fail is mgr is not consolidated + mgr._consolidate_inplace() + cp = mgr.copy(deep=True) + for blk, cp_blk in zip(mgr.blocks, cp.blocks): + bvals = blk.values + cpvals = cp_blk.values + + tm.assert_equal(cpvals, bvals) + + if isinstance(cpvals, np.ndarray): + lbase = cpvals.base + rbase = bvals.base + else: + lbase = cpvals._ndarray.base + rbase = bvals._ndarray.base + + # copy assertion we either have a None for a base or in case of + # some blocks it is an array (e.g. datetimetz), but was copied + if isinstance(cpvals, DatetimeArray): + assert (lbase is None and rbase is None) or (lbase is not rbase) + elif not isinstance(cpvals, np.ndarray): + assert lbase is not rbase + else: + assert lbase is None and rbase is None + + def test_sparse(self): + mgr = create_mgr("a: sparse-1; b: sparse-2") + assert mgr.as_array().dtype == np.float64 + + def test_sparse_mixed(self): + mgr = create_mgr("a: sparse-1; b: sparse-2; c: f8") + assert len(mgr.blocks) == 3 + assert isinstance(mgr, BlockManager) + + @pytest.mark.parametrize( + "mgr_string, dtype", + [("c: f4; d: f2", np.float32), ("c: f4; d: f2; e: f8", np.float64)], + ) + def test_as_array_float(self, mgr_string, dtype): + mgr = create_mgr(mgr_string) + assert mgr.as_array().dtype == dtype + + @pytest.mark.parametrize( + "mgr_string, dtype", + [ + ("a: bool-1; b: bool-2", np.bool_), + ("a: i8-1; b: i8-2; c: i4; d: i2; e: u1", np.int64), + ("c: i4; d: i2; e: u1", np.int32), + ], + ) + def test_as_array_int_bool(self, mgr_string, dtype): + mgr = create_mgr(mgr_string) + assert mgr.as_array().dtype == dtype + + def test_as_array_datetime(self): + mgr = create_mgr("h: datetime-1; g: datetime-2") + assert mgr.as_array().dtype == "M8[ns]" + + def test_as_array_datetime_tz(self): + mgr = create_mgr("h: M8[ns, US/Eastern]; g: M8[ns, CET]") + assert mgr.iget(0).dtype == "datetime64[ns, US/Eastern]" + assert mgr.iget(1).dtype == "datetime64[ns, CET]" + assert mgr.as_array().dtype == "object" + + @pytest.mark.parametrize("t", ["float16", "float32", "float64", "int32", "int64"]) + def test_astype(self, t): + # coerce all + mgr = create_mgr("c: f4; d: f2; e: f8") + + t = np.dtype(t) + tmgr = mgr.astype(t) + assert tmgr.iget(0).dtype.type == t + assert tmgr.iget(1).dtype.type == t + assert tmgr.iget(2).dtype.type == t + + # mixed + mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8") + + t = np.dtype(t) + tmgr = mgr.astype(t, errors="ignore") + assert tmgr.iget(2).dtype.type == t + assert tmgr.iget(4).dtype.type == t + assert tmgr.iget(5).dtype.type == t + assert tmgr.iget(6).dtype.type == t + + assert tmgr.iget(0).dtype.type == np.object_ + assert tmgr.iget(1).dtype.type == np.object_ + if t != np.int64: + assert tmgr.iget(3).dtype.type == np.datetime64 + else: + assert tmgr.iget(3).dtype.type == t + + def test_convert(self, using_infer_string): + def _compare(old_mgr, new_mgr): + """compare the blocks, numeric compare ==, object don't""" + old_blocks = set(old_mgr.blocks) + new_blocks = set(new_mgr.blocks) + assert len(old_blocks) == len(new_blocks) + + # compare non-numeric + for b in old_blocks: + found = False + for nb in new_blocks: + if (b.values == nb.values).all(): + found = True + break + assert found + + for b in new_blocks: + found = False + for ob in old_blocks: + if (b.values == ob.values).all(): + found = True + break + assert found + + # noops + mgr = create_mgr("f: i8; g: f8") + new_mgr = mgr.convert(copy=True) + _compare(mgr, new_mgr) + + # convert + mgr = create_mgr("a,b,foo: object; f: i8; g: f8") + mgr.iset(0, np.array(["1"] * N, dtype=np.object_)) + mgr.iset(1, np.array(["2."] * N, dtype=np.object_)) + mgr.iset(2, np.array(["foo."] * N, dtype=np.object_)) + new_mgr = mgr.convert(copy=True) + dtype = "string[pyarrow_numpy]" if using_infer_string else np.object_ + assert new_mgr.iget(0).dtype == dtype + assert new_mgr.iget(1).dtype == dtype + assert new_mgr.iget(2).dtype == dtype + assert new_mgr.iget(3).dtype == np.int64 + assert new_mgr.iget(4).dtype == np.float64 + + mgr = create_mgr( + "a,b,foo: object; f: i4; bool: bool; dt: datetime; i: i8; g: f8; h: f2" + ) + mgr.iset(0, np.array(["1"] * N, dtype=np.object_)) + mgr.iset(1, np.array(["2."] * N, dtype=np.object_)) + mgr.iset(2, np.array(["foo."] * N, dtype=np.object_)) + new_mgr = mgr.convert(copy=True) + assert new_mgr.iget(0).dtype == dtype + assert new_mgr.iget(1).dtype == dtype + assert new_mgr.iget(2).dtype == dtype + assert new_mgr.iget(3).dtype == np.int32 + assert new_mgr.iget(4).dtype == np.bool_ + assert new_mgr.iget(5).dtype.type, np.datetime64 + assert new_mgr.iget(6).dtype == np.int64 + assert new_mgr.iget(7).dtype == np.float64 + assert new_mgr.iget(8).dtype == np.float16 + + def test_interleave(self): + # self + for dtype in ["f8", "i8", "object", "bool", "complex", "M8[ns]", "m8[ns]"]: + mgr = create_mgr(f"a: {dtype}") + assert mgr.as_array().dtype == dtype + mgr = create_mgr(f"a: {dtype}; b: {dtype}") + assert mgr.as_array().dtype == dtype + + @pytest.mark.parametrize( + "mgr_string, dtype", + [ + ("a: category", "i8"), + ("a: category; b: category", "i8"), + ("a: category; b: category2", "object"), + ("a: category2", "object"), + ("a: category2; b: category2", "object"), + ("a: f8", "f8"), + ("a: f8; b: i8", "f8"), + ("a: f4; b: i8", "f8"), + ("a: f4; b: i8; d: object", "object"), + ("a: bool; b: i8", "object"), + ("a: complex", "complex"), + ("a: f8; b: category", "object"), + ("a: M8[ns]; b: category", "object"), + ("a: M8[ns]; b: bool", "object"), + ("a: M8[ns]; b: i8", "object"), + ("a: m8[ns]; b: bool", "object"), + ("a: m8[ns]; b: i8", "object"), + ("a: M8[ns]; b: m8[ns]", "object"), + ], + ) + def test_interleave_dtype(self, mgr_string, dtype): + # will be converted according the actual dtype of the underlying + mgr = create_mgr("a: category") + assert mgr.as_array().dtype == "i8" + mgr = create_mgr("a: category; b: category2") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: category2") + assert mgr.as_array().dtype == "object" + + # combinations + mgr = create_mgr("a: f8") + assert mgr.as_array().dtype == "f8" + mgr = create_mgr("a: f8; b: i8") + assert mgr.as_array().dtype == "f8" + mgr = create_mgr("a: f4; b: i8") + assert mgr.as_array().dtype == "f8" + mgr = create_mgr("a: f4; b: i8; d: object") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: bool; b: i8") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: complex") + assert mgr.as_array().dtype == "complex" + mgr = create_mgr("a: f8; b: category") + assert mgr.as_array().dtype == "f8" + mgr = create_mgr("a: M8[ns]; b: category") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: M8[ns]; b: bool") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: M8[ns]; b: i8") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: m8[ns]; b: bool") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: m8[ns]; b: i8") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: M8[ns]; b: m8[ns]") + assert mgr.as_array().dtype == "object" + + def test_consolidate_ordering_issues(self, mgr): + mgr.iset(mgr.items.get_loc("f"), np.random.default_rng(2).standard_normal(N)) + mgr.iset(mgr.items.get_loc("d"), np.random.default_rng(2).standard_normal(N)) + mgr.iset(mgr.items.get_loc("b"), np.random.default_rng(2).standard_normal(N)) + mgr.iset(mgr.items.get_loc("g"), np.random.default_rng(2).standard_normal(N)) + mgr.iset(mgr.items.get_loc("h"), np.random.default_rng(2).standard_normal(N)) + + # we have datetime/tz blocks in mgr + cons = mgr.consolidate() + assert cons.nblocks == 4 + cons = mgr.consolidate().get_numeric_data() + assert cons.nblocks == 1 + assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement) + tm.assert_numpy_array_equal( + cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.intp) + ) + + def test_reindex_items(self): + # mgr is not consolidated, f8 & f8-2 blocks + mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2") + + reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0) + # reindex_axis does not consolidate_inplace, as that risks failing to + # invalidate _item_cache + assert not reindexed.is_consolidated() + + tm.assert_index_equal(reindexed.items, Index(["g", "c", "a", "d"])) + tm.assert_almost_equal( + mgr.iget(6).internal_values(), reindexed.iget(0).internal_values() + ) + tm.assert_almost_equal( + mgr.iget(2).internal_values(), reindexed.iget(1).internal_values() + ) + tm.assert_almost_equal( + mgr.iget(0).internal_values(), reindexed.iget(2).internal_values() + ) + tm.assert_almost_equal( + mgr.iget(3).internal_values(), reindexed.iget(3).internal_values() + ) + + def test_get_numeric_data(self, using_copy_on_write): + mgr = create_mgr( + "int: int; float: float; complex: complex;" + "str: object; bool: bool; obj: object; dt: datetime", + item_shape=(3,), + ) + mgr.iset(5, np.array([1, 2, 3], dtype=np.object_)) + + numeric = mgr.get_numeric_data() + tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"])) + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("float")).internal_values(), + numeric.iget(numeric.items.get_loc("float")).internal_values(), + ) + + # Check sharing + numeric.iset( + numeric.items.get_loc("float"), + np.array([100.0, 200.0, 300.0]), + inplace=True, + ) + if using_copy_on_write: + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("float")).internal_values(), + np.array([1.0, 1.0, 1.0]), + ) + else: + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("float")).internal_values(), + np.array([100.0, 200.0, 300.0]), + ) + + def test_get_bool_data(self, using_copy_on_write): + mgr = create_mgr( + "int: int; float: float; complex: complex;" + "str: object; bool: bool; obj: object; dt: datetime", + item_shape=(3,), + ) + mgr.iset(6, np.array([True, False, True], dtype=np.object_)) + + bools = mgr.get_bool_data() + tm.assert_index_equal(bools.items, Index(["bool"])) + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("bool")).internal_values(), + bools.iget(bools.items.get_loc("bool")).internal_values(), + ) + + bools.iset(0, np.array([True, False, True]), inplace=True) + if using_copy_on_write: + tm.assert_numpy_array_equal( + mgr.iget(mgr.items.get_loc("bool")).internal_values(), + np.array([True, True, True]), + ) + else: + tm.assert_numpy_array_equal( + mgr.iget(mgr.items.get_loc("bool")).internal_values(), + np.array([True, False, True]), + ) + + def test_unicode_repr_doesnt_raise(self): + repr(create_mgr("b,\u05d0: object")) + + @pytest.mark.parametrize( + "mgr_string", ["a,b,c: i8-1; d,e,f: i8-2", "a,a,a: i8-1; b,b,b: i8-2"] + ) + def test_equals(self, mgr_string): + # unique items + bm1 = create_mgr(mgr_string) + bm2 = BlockManager(bm1.blocks[::-1], bm1.axes) + assert bm1.equals(bm2) + + @pytest.mark.parametrize( + "mgr_string", + [ + "a:i8;b:f8", # basic case + "a:i8;b:f8;c:c8;d:b", # many types + "a:i8;e:dt;f:td;g:string", # more types + "a:i8;b:category;c:category2", # categories + "c:sparse;d:sparse_na;b:f8", # sparse + ], + ) + def test_equals_block_order_different_dtypes(self, mgr_string): + # GH 9330 + bm = create_mgr(mgr_string) + block_perms = itertools.permutations(bm.blocks) + for bm_perm in block_perms: + bm_this = BlockManager(tuple(bm_perm), bm.axes) + assert bm.equals(bm_this) + assert bm_this.equals(bm) + + def test_single_mgr_ctor(self): + mgr = create_single_mgr("f8", num_rows=5) + assert mgr.external_values().tolist() == [0.0, 1.0, 2.0, 3.0, 4.0] + + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, value): + bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2") + + msg = ( + 'For argument "inplace" expected type bool, ' + f"received type {type(value).__name__}." + ) + with pytest.raises(ValueError, match=msg): + bm1.replace_list([1], [2], inplace=value) + + def test_iset_split_block(self): + bm = create_mgr("a,b,c: i8; d: f8") + bm._iset_split_block(0, np.array([0])) + tm.assert_numpy_array_equal( + bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32") + ) + # First indexer currently does not have a block associated with it in case + tm.assert_numpy_array_equal( + bm.blknos, np.array([0, 0, 0, 1], dtype="int64" if IS64 else "int32") + ) + assert len(bm.blocks) == 2 + + def test_iset_split_block_values(self): + bm = create_mgr("a,b,c: i8; d: f8") + bm._iset_split_block(0, np.array([0]), np.array([list(range(10))])) + tm.assert_numpy_array_equal( + bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32") + ) + # First indexer currently does not have a block associated with it in case + tm.assert_numpy_array_equal( + bm.blknos, np.array([0, 2, 2, 1], dtype="int64" if IS64 else "int32") + ) + assert len(bm.blocks) == 3 + + +def _as_array(mgr): + if mgr.ndim == 1: + return mgr.external_values() + return mgr.as_array().T + + +class TestIndexing: + # Nosetests-style data-driven tests. + # + # This test applies different indexing routines to block managers and + # compares the outcome to the result of same operations on np.ndarray. + # + # NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests + # and are disabled. + + MANAGERS = [ + create_single_mgr("f8", N), + create_single_mgr("i8", N), + # 2-dim + create_mgr("a,b,c,d,e,f: f8", item_shape=(N,)), + create_mgr("a,b,c,d,e,f: i8", item_shape=(N,)), + create_mgr("a,b: f8; c,d: i8; e,f: string", item_shape=(N,)), + create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N,)), + ] + + @pytest.mark.parametrize("mgr", MANAGERS) + def test_get_slice(self, mgr): + def assert_slice_ok(mgr, axis, slobj): + mat = _as_array(mgr) + + # we maybe using an ndarray to test slicing and + # might not be the full length of the axis + if isinstance(slobj, np.ndarray): + ax = mgr.axes[axis] + if len(ax) and len(slobj) and len(slobj) != len(ax): + slobj = np.concatenate( + [slobj, np.zeros(len(ax) - len(slobj), dtype=bool)] + ) + + if isinstance(slobj, slice): + sliced = mgr.get_slice(slobj, axis=axis) + elif ( + mgr.ndim == 1 + and axis == 0 + and isinstance(slobj, np.ndarray) + and slobj.dtype == bool + ): + sliced = mgr.get_rows_with_mask(slobj) + else: + # BlockManager doesn't support non-slice, SingleBlockManager + # doesn't support axis > 0 + raise TypeError(slobj) + + mat_slobj = (slice(None),) * axis + (slobj,) + tm.assert_numpy_array_equal( + mat[mat_slobj], _as_array(sliced), check_dtype=False + ) + tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis]) + + assert mgr.ndim <= 2, mgr.ndim + for ax in range(mgr.ndim): + # slice + assert_slice_ok(mgr, ax, slice(None)) + assert_slice_ok(mgr, ax, slice(3)) + assert_slice_ok(mgr, ax, slice(100)) + assert_slice_ok(mgr, ax, slice(1, 4)) + assert_slice_ok(mgr, ax, slice(3, 0, -2)) + + if mgr.ndim < 2: + # 2D only support slice objects + + # boolean mask + assert_slice_ok(mgr, ax, np.ones(mgr.shape[ax], dtype=np.bool_)) + assert_slice_ok(mgr, ax, np.zeros(mgr.shape[ax], dtype=np.bool_)) + + if mgr.shape[ax] >= 3: + assert_slice_ok(mgr, ax, np.arange(mgr.shape[ax]) % 3 == 0) + assert_slice_ok( + mgr, ax, np.array([True, True, False], dtype=np.bool_) + ) + + @pytest.mark.parametrize("mgr", MANAGERS) + def test_take(self, mgr): + def assert_take_ok(mgr, axis, indexer): + mat = _as_array(mgr) + taken = mgr.take(indexer, axis) + tm.assert_numpy_array_equal( + np.take(mat, indexer, axis), _as_array(taken), check_dtype=False + ) + tm.assert_index_equal(mgr.axes[axis].take(indexer), taken.axes[axis]) + + for ax in range(mgr.ndim): + # take/fancy indexer + assert_take_ok(mgr, ax, indexer=np.array([], dtype=np.intp)) + assert_take_ok(mgr, ax, indexer=np.array([0, 0, 0], dtype=np.intp)) + assert_take_ok( + mgr, ax, indexer=np.array(list(range(mgr.shape[ax])), dtype=np.intp) + ) + + if mgr.shape[ax] >= 3: + assert_take_ok(mgr, ax, indexer=np.array([0, 1, 2], dtype=np.intp)) + assert_take_ok(mgr, ax, indexer=np.array([-1, -2, -3], dtype=np.intp)) + + @pytest.mark.parametrize("mgr", MANAGERS) + @pytest.mark.parametrize("fill_value", [None, np.nan, 100.0]) + def test_reindex_axis(self, fill_value, mgr): + def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value): + mat = _as_array(mgr) + indexer = mgr.axes[axis].get_indexer_for(new_labels) + + reindexed = mgr.reindex_axis(new_labels, axis, fill_value=fill_value) + tm.assert_numpy_array_equal( + algos.take_nd(mat, indexer, axis, fill_value=fill_value), + _as_array(reindexed), + check_dtype=False, + ) + tm.assert_index_equal(reindexed.axes[axis], new_labels) + + for ax in range(mgr.ndim): + assert_reindex_axis_is_ok(mgr, ax, Index([]), fill_value) + assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax], fill_value) + assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][[0, 0, 0]], fill_value) + assert_reindex_axis_is_ok(mgr, ax, Index(["foo", "bar", "baz"]), fill_value) + assert_reindex_axis_is_ok( + mgr, ax, Index(["foo", mgr.axes[ax][0], "baz"]), fill_value + ) + + if mgr.shape[ax] >= 3: + assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][:-3], fill_value) + assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][-3::-1], fill_value) + assert_reindex_axis_is_ok( + mgr, ax, mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value + ) + + @pytest.mark.parametrize("mgr", MANAGERS) + @pytest.mark.parametrize("fill_value", [None, np.nan, 100.0]) + def test_reindex_indexer(self, fill_value, mgr): + def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value): + mat = _as_array(mgr) + reindexed_mat = algos.take_nd(mat, indexer, axis, fill_value=fill_value) + reindexed = mgr.reindex_indexer( + new_labels, indexer, axis, fill_value=fill_value + ) + tm.assert_numpy_array_equal( + reindexed_mat, _as_array(reindexed), check_dtype=False + ) + tm.assert_index_equal(reindexed.axes[axis], new_labels) + + for ax in range(mgr.ndim): + assert_reindex_indexer_is_ok( + mgr, ax, Index([]), np.array([], dtype=np.intp), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, + ax, + Index(["foo"] * mgr.shape[ax]), + np.arange(mgr.shape[ax]), + fill_value, + ) + assert_reindex_indexer_is_ok( + mgr, ax, mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], fill_value + ) + assert_reindex_indexer_is_ok( + mgr, ax, Index(["foo", "bar", "baz"]), np.array([0, 0, 0]), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, ax, Index(["foo", "bar", "baz"]), np.array([-1, 0, -1]), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, + ax, + Index(["foo", mgr.axes[ax][0], "baz"]), + np.array([-1, -1, -1]), + fill_value, + ) + + if mgr.shape[ax] >= 3: + assert_reindex_indexer_is_ok( + mgr, + ax, + Index(["foo", "bar", "baz"]), + np.array([0, 1, 2]), + fill_value, + ) + + +class TestBlockPlacement: + @pytest.mark.parametrize( + "slc, expected", + [ + (slice(0, 4), 4), + (slice(0, 4, 2), 2), + (slice(0, 3, 2), 2), + (slice(0, 1, 2), 1), + (slice(1, 0, -1), 1), + ], + ) + def test_slice_len(self, slc, expected): + assert len(BlockPlacement(slc)) == expected + + @pytest.mark.parametrize("slc", [slice(1, 1, 0), slice(1, 2, 0)]) + def test_zero_step_raises(self, slc): + msg = "slice step cannot be zero" + with pytest.raises(ValueError, match=msg): + BlockPlacement(slc) + + def test_slice_canonize_negative_stop(self): + # GH#37524 negative stop is OK with negative step and positive start + slc = slice(3, -1, -2) + + bp = BlockPlacement(slc) + assert bp.indexer == slice(3, None, -2) + + @pytest.mark.parametrize( + "slc", + [ + slice(None, None), + slice(10, None), + slice(None, None, -1), + slice(None, 10, -1), + # These are "unbounded" because negative index will + # change depending on container shape. + slice(-1, None), + slice(None, -1), + slice(-1, -1), + slice(-1, None, -1), + slice(None, -1, -1), + slice(-1, -1, -1), + ], + ) + def test_unbounded_slice_raises(self, slc): + msg = "unbounded slice" + with pytest.raises(ValueError, match=msg): + BlockPlacement(slc) + + @pytest.mark.parametrize( + "slc", + [ + slice(0, 0), + slice(100, 0), + slice(100, 100), + slice(100, 100, -1), + slice(0, 100, -1), + ], + ) + def test_not_slice_like_slices(self, slc): + assert not BlockPlacement(slc).is_slice_like + + @pytest.mark.parametrize( + "arr, slc", + [ + ([0], slice(0, 1, 1)), + ([100], slice(100, 101, 1)), + ([0, 1, 2], slice(0, 3, 1)), + ([0, 5, 10], slice(0, 15, 5)), + ([0, 100], slice(0, 200, 100)), + ([2, 1], slice(2, 0, -1)), + ], + ) + def test_array_to_slice_conversion(self, arr, slc): + assert BlockPlacement(arr).as_slice == slc + + @pytest.mark.parametrize( + "arr", + [ + [], + [-1], + [-1, -2, -3], + [-10], + [-1], + [-1, 0, 1, 2], + [-2, 0, 2, 4], + [1, 0, -1], + [1, 1, 1], + ], + ) + def test_not_slice_like_arrays(self, arr): + assert not BlockPlacement(arr).is_slice_like + + @pytest.mark.parametrize( + "slc, expected", + [(slice(0, 3), [0, 1, 2]), (slice(0, 0), []), (slice(3, 0), [])], + ) + def test_slice_iter(self, slc, expected): + assert list(BlockPlacement(slc)) == expected + + @pytest.mark.parametrize( + "slc, arr", + [ + (slice(0, 3), [0, 1, 2]), + (slice(0, 0), []), + (slice(3, 0), []), + (slice(3, 0, -1), [3, 2, 1]), + ], + ) + def test_slice_to_array_conversion(self, slc, arr): + tm.assert_numpy_array_equal( + BlockPlacement(slc).as_array, np.asarray(arr, dtype=np.intp) + ) + + def test_blockplacement_add(self): + bpl = BlockPlacement(slice(0, 5)) + assert bpl.add(1).as_slice == slice(1, 6, 1) + assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2) + assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5] + + @pytest.mark.parametrize( + "val, inc, expected", + [ + (slice(0, 0), 0, []), + (slice(1, 4), 0, [1, 2, 3]), + (slice(3, 0, -1), 0, [3, 2, 1]), + ([1, 2, 4], 0, [1, 2, 4]), + (slice(0, 0), 10, []), + (slice(1, 4), 10, [11, 12, 13]), + (slice(3, 0, -1), 10, [13, 12, 11]), + ([1, 2, 4], 10, [11, 12, 14]), + (slice(0, 0), -1, []), + (slice(1, 4), -1, [0, 1, 2]), + ([1, 2, 4], -1, [0, 1, 3]), + ], + ) + def test_blockplacement_add_int(self, val, inc, expected): + assert list(BlockPlacement(val).add(inc)) == expected + + @pytest.mark.parametrize("val", [slice(1, 4), [1, 2, 4]]) + def test_blockplacement_add_int_raises(self, val): + msg = "iadd causes length change" + with pytest.raises(ValueError, match=msg): + BlockPlacement(val).add(-10) + + +class TestCanHoldElement: + @pytest.fixture( + params=[ + lambda x: x, + lambda x: x.to_series(), + lambda x: x._data, + lambda x: list(x), + lambda x: x.astype(object), + lambda x: np.asarray(x), + lambda x: x[0], + lambda x: x[:0], + ] + ) + def element(self, request): + """ + Functions that take an Index and return an element that should have + blk._can_hold_element(element) for a Block with this index's dtype. + """ + return request.param + + def test_datetime_block_can_hold_element(self): + block = create_block("datetime", [0]) + + assert block._can_hold_element([]) + + # We will check that block._can_hold_element iff arr.__setitem__ works + arr = pd.array(block.values.ravel()) + + # coerce None + assert block._can_hold_element(None) + arr[0] = None + assert arr[0] is pd.NaT + + # coerce different types of datetime objects + vals = [np.datetime64("2010-10-10"), datetime(2010, 10, 10)] + for val in vals: + assert block._can_hold_element(val) + arr[0] = val + + val = date(2010, 10, 10) + assert not block._can_hold_element(val) + + msg = ( + "value should be a 'Timestamp', 'NaT', " + "or array of those. Got 'date' instead." + ) + with pytest.raises(TypeError, match=msg): + arr[0] = val + + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_interval_can_hold_element_emptylist(self, dtype, element): + arr = np.array([1, 3, 4], dtype=dtype) + ii = IntervalIndex.from_breaks(arr) + blk = new_block(ii._data, BlockPlacement([1]), ndim=2) + + assert blk._can_hold_element([]) + # TODO: check this holds for all blocks + + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_interval_can_hold_element(self, dtype, element): + arr = np.array([1, 3, 4, 9], dtype=dtype) + ii = IntervalIndex.from_breaks(arr) + blk = new_block(ii._data, BlockPlacement([1]), ndim=2) + + elem = element(ii) + self.check_series_setitem(elem, ii, True) + assert blk._can_hold_element(elem) + + # Careful: to get the expected Series-inplace behavior we need + # `elem` to not have the same length as `arr` + ii2 = IntervalIndex.from_breaks(arr[:-1], closed="neither") + elem = element(ii2) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, ii, False) + assert not blk._can_hold_element(elem) + + ii3 = IntervalIndex.from_breaks([Timestamp(1), Timestamp(3), Timestamp(4)]) + elem = element(ii3) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, ii, False) + assert not blk._can_hold_element(elem) + + ii4 = IntervalIndex.from_breaks([Timedelta(1), Timedelta(3), Timedelta(4)]) + elem = element(ii4) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, ii, False) + assert not blk._can_hold_element(elem) + + def test_period_can_hold_element_emptylist(self): + pi = period_range("2016", periods=3, freq="Y") + blk = new_block(pi._data.reshape(1, 3), BlockPlacement([1]), ndim=2) + + assert blk._can_hold_element([]) + + def test_period_can_hold_element(self, element): + pi = period_range("2016", periods=3, freq="Y") + + elem = element(pi) + self.check_series_setitem(elem, pi, True) + + # Careful: to get the expected Series-inplace behavior we need + # `elem` to not have the same length as `arr` + pi2 = pi.asfreq("D")[:-1] + elem = element(pi2) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, pi, False) + + dti = pi.to_timestamp("s")[:-1] + elem = element(dti) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, pi, False) + + def check_can_hold_element(self, obj, elem, inplace: bool): + blk = obj._mgr.blocks[0] + if inplace: + assert blk._can_hold_element(elem) + else: + assert not blk._can_hold_element(elem) + + def check_series_setitem(self, elem, index: Index, inplace: bool): + arr = index._data.copy() + ser = Series(arr, copy=False) + + self.check_can_hold_element(ser, elem, inplace) + + if is_scalar(elem): + ser[0] = elem + else: + ser[: len(elem)] = elem + + if inplace: + assert ser.array is arr # i.e. setting was done inplace + else: + assert ser.dtype == object + + +class TestShouldStore: + def test_should_store_categorical(self): + cat = Categorical(["A", "B", "C"]) + df = DataFrame(cat) + blk = df._mgr.blocks[0] + + # matching dtype + assert blk.should_store(cat) + assert blk.should_store(cat[:-1]) + + # different dtype + assert not blk.should_store(cat.as_ordered()) + + # ndarray instead of Categorical + assert not blk.should_store(np.asarray(cat)) + + +def test_validate_ndim(): + values = np.array([1.0, 2.0]) + placement = BlockPlacement(slice(2)) + msg = r"Wrong number of dimensions. values.ndim != ndim \[1 != 2\]" + + with pytest.raises(ValueError, match=msg): + make_block(values, placement, ndim=2) + + +def test_block_shape(): + idx = Index([0, 1, 2, 3, 4]) + a = Series([1, 2, 3]).reindex(idx) + b = Series(Categorical([1, 2, 3])).reindex(idx) + + assert a._mgr.blocks[0].mgr_locs.indexer == b._mgr.blocks[0].mgr_locs.indexer + + +def test_make_block_no_pandas_array(block_maker): + # https://github.com/pandas-dev/pandas/pull/24866 + arr = pd.arrays.NumpyExtensionArray(np.array([1, 2])) + + # NumpyExtensionArray, no dtype + result = block_maker(arr, BlockPlacement(slice(len(arr))), ndim=arr.ndim) + assert result.dtype.kind in ["i", "u"] + + if block_maker is make_block: + # new_block requires caller to unwrap NumpyExtensionArray + assert result.is_extension is False + + # NumpyExtensionArray, NumpyEADtype + result = block_maker(arr, slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim) + assert result.dtype.kind in ["i", "u"] + assert result.is_extension is False + + # new_block no longer taked dtype keyword + # ndarray, NumpyEADtype + result = block_maker( + arr.to_numpy(), slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim + ) + assert result.dtype.kind in ["i", "u"] + assert result.is_extension is False diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/test_managers.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/test_managers.py new file mode 100644 index 0000000000000000000000000000000000000000..f40362c299717be5f2e8665e4547276c2af05fb0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/internals/test_managers.py @@ -0,0 +1,103 @@ +""" +Testing interaction between the different managers (BlockManager, ArrayManager) +""" +import os +import subprocess +import sys + +import pytest + +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd +import pandas._testing as tm +from pandas.core.internals import ( + ArrayManager, + BlockManager, + SingleArrayManager, + SingleBlockManager, +) + + +def test_dataframe_creation(): + msg = "data_manager option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.data_manager", "block"): + df_block = pd.DataFrame( + {"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]} + ) + assert isinstance(df_block._mgr, BlockManager) + + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.data_manager", "array"): + df_array = pd.DataFrame( + {"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]} + ) + assert isinstance(df_array._mgr, ArrayManager) + + # also ensure both are seen as equal + tm.assert_frame_equal(df_block, df_array) + + # conversion from one manager to the other + result = df_block._as_manager("block") + assert isinstance(result._mgr, BlockManager) + result = df_block._as_manager("array") + assert isinstance(result._mgr, ArrayManager) + tm.assert_frame_equal(result, df_block) + assert all( + array_equivalent(left, right) + for left, right in zip(result._mgr.arrays, df_array._mgr.arrays) + ) + + result = df_array._as_manager("array") + assert isinstance(result._mgr, ArrayManager) + result = df_array._as_manager("block") + assert isinstance(result._mgr, BlockManager) + tm.assert_frame_equal(result, df_array) + assert len(result._mgr.blocks) == 2 + + +def test_series_creation(): + msg = "data_manager option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.data_manager", "block"): + s_block = pd.Series([1, 2, 3], name="A", index=["a", "b", "c"]) + assert isinstance(s_block._mgr, SingleBlockManager) + + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.data_manager", "array"): + s_array = pd.Series([1, 2, 3], name="A", index=["a", "b", "c"]) + assert isinstance(s_array._mgr, SingleArrayManager) + + # also ensure both are seen as equal + tm.assert_series_equal(s_block, s_array) + + # conversion from one manager to the other + result = s_block._as_manager("block") + assert isinstance(result._mgr, SingleBlockManager) + result = s_block._as_manager("array") + assert isinstance(result._mgr, SingleArrayManager) + tm.assert_series_equal(result, s_block) + + result = s_array._as_manager("array") + assert isinstance(result._mgr, SingleArrayManager) + result = s_array._as_manager("block") + assert isinstance(result._mgr, SingleBlockManager) + tm.assert_series_equal(result, s_array) + + +@pytest.mark.single_cpu +@pytest.mark.parametrize("manager", ["block", "array"]) +def test_array_manager_depr_env_var(manager): + # GH#55043 + test_env = os.environ.copy() + test_env["PANDAS_DATA_MANAGER"] = manager + response = subprocess.run( + [sys.executable, "-c", "import pandas"], + capture_output=True, + env=test_env, + check=True, + ) + msg = "FutureWarning: The env variable PANDAS_DATA_MANAGER is set" + stderr_msg = response.stderr.decode("utf-8") + assert msg in stderr_msg, stderr_msg diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81c138a237f366d64adb80f2331774ebc186c52b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_arithmetic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..775b0ec238a5b4bcad761136bfad8f7251f49746 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_asfreq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_asfreq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e300e43ede0988405f83b24d3ef9763b1a2f2bb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_asfreq.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_period.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_period.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..715106f171d07f742fe1cfb7e0635471f92f4617 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_period.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/test_asfreq.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/test_asfreq.py new file mode 100644 index 0000000000000000000000000000000000000000..73c4d8061c25789c1ec2a5e4d2c2851d4066a90e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/test_asfreq.py @@ -0,0 +1,828 @@ +import pytest + +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG +from pandas.errors import OutOfBoundsDatetime + +from pandas import ( + Period, + Timestamp, + offsets, +) +import pandas._testing as tm + +bday_msg = "Period with BDay freq is deprecated" + + +class TestFreqConversion: + """Test frequency conversion of date objects""" + + @pytest.mark.filterwarnings("ignore:Period with BDay:FutureWarning") + @pytest.mark.parametrize("freq", ["Y", "Q", "M", "W", "B", "D"]) + def test_asfreq_near_zero(self, freq): + # GH#19643, GH#19650 + per = Period("0001-01-01", freq=freq) + tup1 = (per.year, per.hour, per.day) + + prev = per - 1 + assert prev.ordinal == per.ordinal - 1 + tup2 = (prev.year, prev.month, prev.day) + assert tup2 < tup1 + + def test_asfreq_near_zero_weekly(self): + # GH#19834 + per1 = Period("0001-01-01", "D") + 6 + per2 = Period("0001-01-01", "D") - 6 + week1 = per1.asfreq("W") + week2 = per2.asfreq("W") + assert week1 != week2 + assert week1.asfreq("D", "E") >= per1 + assert week2.asfreq("D", "S") <= per2 + + def test_to_timestamp_out_of_bounds(self): + # GH#19643, used to incorrectly give Timestamp in 1754 + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + per = Period("0001-01-01", freq="B") + msg = "Out of bounds nanosecond timestamp" + with pytest.raises(OutOfBoundsDatetime, match=msg): + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + per.to_timestamp() + + def test_asfreq_corner(self): + val = Period(freq="Y", year=2007) + result1 = val.asfreq("5min") + result2 = val.asfreq("min") + expected = Period("2007-12-31 23:59", freq="min") + assert result1.ordinal == expected.ordinal + assert result1.freqstr == "5min" + assert result2.ordinal == expected.ordinal + assert result2.freqstr == "min" + + def test_conv_annual(self): + # frequency conversion tests: from Annual Frequency + + ival_A = Period(freq="Y", year=2007) + + ival_AJAN = Period(freq="Y-JAN", year=2007) + ival_AJUN = Period(freq="Y-JUN", year=2007) + ival_ANOV = Period(freq="Y-NOV", year=2007) + + ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1) + ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4) + ival_A_to_M_start = Period(freq="M", year=2007, month=1) + ival_A_to_M_end = Period(freq="M", year=2007, month=12) + ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1) + ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1) + ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31) + ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31) + ival_A_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_A_to_H_end = Period(freq="h", year=2007, month=12, day=31, hour=23) + ival_A_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_A_to_T_end = Period( + freq="Min", year=2007, month=12, day=31, hour=23, minute=59 + ) + ival_A_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_A_to_S_end = Period( + freq="s", year=2007, month=12, day=31, hour=23, minute=59, second=59 + ) + + ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31) + ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1) + ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30) + ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1) + ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30) + ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1) + + assert ival_A.asfreq("Q", "s") == ival_A_to_Q_start + assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end + assert ival_A.asfreq("M", "s") == ival_A_to_M_start + assert ival_A.asfreq("M", "E") == ival_A_to_M_end + assert ival_A.asfreq("W", "s") == ival_A_to_W_start + assert ival_A.asfreq("W", "E") == ival_A_to_W_end + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_A.asfreq("B", "s") == ival_A_to_B_start + assert ival_A.asfreq("B", "E") == ival_A_to_B_end + assert ival_A.asfreq("D", "s") == ival_A_to_D_start + assert ival_A.asfreq("D", "E") == ival_A_to_D_end + msg = "'H' is deprecated and will be removed in a future version." + with tm.assert_produces_warning(FutureWarning, match=msg): + assert ival_A.asfreq("H", "s") == ival_A_to_H_start + assert ival_A.asfreq("H", "E") == ival_A_to_H_end + assert ival_A.asfreq("min", "s") == ival_A_to_T_start + assert ival_A.asfreq("min", "E") == ival_A_to_T_end + msg = "'T' is deprecated and will be removed in a future version." + with tm.assert_produces_warning(FutureWarning, match=msg): + assert ival_A.asfreq("T", "s") == ival_A_to_T_start + assert ival_A.asfreq("T", "E") == ival_A_to_T_end + msg = "'S' is deprecated and will be removed in a future version." + with tm.assert_produces_warning(FutureWarning, match=msg): + assert ival_A.asfreq("S", "S") == ival_A_to_S_start + assert ival_A.asfreq("S", "E") == ival_A_to_S_end + + assert ival_AJAN.asfreq("D", "s") == ival_AJAN_to_D_start + assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end + + assert ival_AJUN.asfreq("D", "s") == ival_AJUN_to_D_start + assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end + + assert ival_ANOV.asfreq("D", "s") == ival_ANOV_to_D_start + assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end + + assert ival_A.asfreq("Y") == ival_A + + def test_conv_quarterly(self): + # frequency conversion tests: from Quarterly Frequency + + ival_Q = Period(freq="Q", year=2007, quarter=1) + ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4) + + ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1) + ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1) + + ival_Q_to_A = Period(freq="Y", year=2007) + ival_Q_to_M_start = Period(freq="M", year=2007, month=1) + ival_Q_to_M_end = Period(freq="M", year=2007, month=3) + ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1) + ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1) + ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30) + ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31) + ival_Q_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_Q_to_H_end = Period(freq="h", year=2007, month=3, day=31, hour=23) + ival_Q_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_Q_to_T_end = Period( + freq="Min", year=2007, month=3, day=31, hour=23, minute=59 + ) + ival_Q_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_Q_to_S_end = Period( + freq="s", year=2007, month=3, day=31, hour=23, minute=59, second=59 + ) + + ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1) + ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30) + + ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1) + ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30) + + assert ival_Q.asfreq("Y") == ival_Q_to_A + assert ival_Q_end_of_year.asfreq("Y") == ival_Q_to_A + + assert ival_Q.asfreq("M", "s") == ival_Q_to_M_start + assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end + assert ival_Q.asfreq("W", "s") == ival_Q_to_W_start + assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_Q.asfreq("B", "s") == ival_Q_to_B_start + assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end + assert ival_Q.asfreq("D", "s") == ival_Q_to_D_start + assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end + assert ival_Q.asfreq("h", "s") == ival_Q_to_H_start + assert ival_Q.asfreq("h", "E") == ival_Q_to_H_end + assert ival_Q.asfreq("Min", "s") == ival_Q_to_T_start + assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end + assert ival_Q.asfreq("s", "s") == ival_Q_to_S_start + assert ival_Q.asfreq("s", "E") == ival_Q_to_S_end + + assert ival_QEJAN.asfreq("D", "s") == ival_QEJAN_to_D_start + assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end + assert ival_QEJUN.asfreq("D", "s") == ival_QEJUN_to_D_start + assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end + + assert ival_Q.asfreq("Q") == ival_Q + + def test_conv_monthly(self): + # frequency conversion tests: from Monthly Frequency + + ival_M = Period(freq="M", year=2007, month=1) + ival_M_end_of_year = Period(freq="M", year=2007, month=12) + ival_M_end_of_quarter = Period(freq="M", year=2007, month=3) + ival_M_to_A = Period(freq="Y", year=2007) + ival_M_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1) + ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1) + ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31) + ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31) + ival_M_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_M_to_H_end = Period(freq="h", year=2007, month=1, day=31, hour=23) + ival_M_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_M_to_T_end = Period( + freq="Min", year=2007, month=1, day=31, hour=23, minute=59 + ) + ival_M_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_M_to_S_end = Period( + freq="s", year=2007, month=1, day=31, hour=23, minute=59, second=59 + ) + + assert ival_M.asfreq("Y") == ival_M_to_A + assert ival_M_end_of_year.asfreq("Y") == ival_M_to_A + assert ival_M.asfreq("Q") == ival_M_to_Q + assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q + + assert ival_M.asfreq("W", "s") == ival_M_to_W_start + assert ival_M.asfreq("W", "E") == ival_M_to_W_end + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_M.asfreq("B", "s") == ival_M_to_B_start + assert ival_M.asfreq("B", "E") == ival_M_to_B_end + assert ival_M.asfreq("D", "s") == ival_M_to_D_start + assert ival_M.asfreq("D", "E") == ival_M_to_D_end + assert ival_M.asfreq("h", "s") == ival_M_to_H_start + assert ival_M.asfreq("h", "E") == ival_M_to_H_end + assert ival_M.asfreq("Min", "s") == ival_M_to_T_start + assert ival_M.asfreq("Min", "E") == ival_M_to_T_end + assert ival_M.asfreq("s", "s") == ival_M_to_S_start + assert ival_M.asfreq("s", "E") == ival_M_to_S_end + + assert ival_M.asfreq("M") == ival_M + + def test_conv_weekly(self): + # frequency conversion tests: from Weekly Frequency + ival_W = Period(freq="W", year=2007, month=1, day=1) + + ival_WSUN = Period(freq="W", year=2007, month=1, day=7) + ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6) + ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5) + ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4) + ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3) + ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2) + ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1) + + ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7) + ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31) + ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6) + ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30) + ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5) + ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29) + ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4) + ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28) + ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3) + ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27) + ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2) + ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26) + ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1) + + ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31) + ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31) + ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31) + ival_W_to_A = Period(freq="Y", year=2007) + ival_W_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_W_to_M = Period(freq="M", year=2007, month=1) + + if Period(freq="D", year=2007, month=12, day=31).weekday == 6: + ival_W_to_A_end_of_year = Period(freq="Y", year=2007) + else: + ival_W_to_A_end_of_year = Period(freq="Y", year=2008) + + if Period(freq="D", year=2007, month=3, day=31).weekday == 6: + ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1) + else: + ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2) + + if Period(freq="D", year=2007, month=1, day=31).weekday == 6: + ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1) + else: + ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2) + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1) + ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5) + ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7) + ival_W_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_W_to_H_end = Period(freq="h", year=2007, month=1, day=7, hour=23) + ival_W_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_W_to_T_end = Period( + freq="Min", year=2007, month=1, day=7, hour=23, minute=59 + ) + ival_W_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_W_to_S_end = Period( + freq="s", year=2007, month=1, day=7, hour=23, minute=59, second=59 + ) + + assert ival_W.asfreq("Y") == ival_W_to_A + assert ival_W_end_of_year.asfreq("Y") == ival_W_to_A_end_of_year + + assert ival_W.asfreq("Q") == ival_W_to_Q + assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter + + assert ival_W.asfreq("M") == ival_W_to_M + assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_W.asfreq("B", "s") == ival_W_to_B_start + assert ival_W.asfreq("B", "E") == ival_W_to_B_end + + assert ival_W.asfreq("D", "s") == ival_W_to_D_start + assert ival_W.asfreq("D", "E") == ival_W_to_D_end + + assert ival_WSUN.asfreq("D", "s") == ival_WSUN_to_D_start + assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end + assert ival_WSAT.asfreq("D", "s") == ival_WSAT_to_D_start + assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end + assert ival_WFRI.asfreq("D", "s") == ival_WFRI_to_D_start + assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end + assert ival_WTHU.asfreq("D", "s") == ival_WTHU_to_D_start + assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end + assert ival_WWED.asfreq("D", "s") == ival_WWED_to_D_start + assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end + assert ival_WTUE.asfreq("D", "s") == ival_WTUE_to_D_start + assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end + assert ival_WMON.asfreq("D", "s") == ival_WMON_to_D_start + assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end + + assert ival_W.asfreq("h", "s") == ival_W_to_H_start + assert ival_W.asfreq("h", "E") == ival_W_to_H_end + assert ival_W.asfreq("Min", "s") == ival_W_to_T_start + assert ival_W.asfreq("Min", "E") == ival_W_to_T_end + assert ival_W.asfreq("s", "s") == ival_W_to_S_start + assert ival_W.asfreq("s", "E") == ival_W_to_S_end + + assert ival_W.asfreq("W") == ival_W + + msg = INVALID_FREQ_ERR_MSG + with pytest.raises(ValueError, match=msg): + ival_W.asfreq("WK") + + def test_conv_weekly_legacy(self): + # frequency conversion tests: from Weekly Frequency + msg = INVALID_FREQ_ERR_MSG + with pytest.raises(ValueError, match=msg): + Period(freq="WK", year=2007, month=1, day=1) + + with pytest.raises(ValueError, match=msg): + Period(freq="WK-SAT", year=2007, month=1, day=6) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-FRI", year=2007, month=1, day=5) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-THU", year=2007, month=1, day=4) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-WED", year=2007, month=1, day=3) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-TUE", year=2007, month=1, day=2) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-MON", year=2007, month=1, day=1) + + def test_conv_business(self): + # frequency conversion tests: from Business Frequency" + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_B = Period(freq="B", year=2007, month=1, day=1) + ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31) + ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30) + ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31) + ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5) + + ival_B_to_A = Period(freq="Y", year=2007) + ival_B_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_B_to_M = Period(freq="M", year=2007, month=1) + ival_B_to_W = Period(freq="W", year=2007, month=1, day=7) + ival_B_to_D = Period(freq="D", year=2007, month=1, day=1) + ival_B_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_B_to_H_end = Period(freq="h", year=2007, month=1, day=1, hour=23) + ival_B_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_B_to_T_end = Period( + freq="Min", year=2007, month=1, day=1, hour=23, minute=59 + ) + ival_B_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_B_to_S_end = Period( + freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 + ) + + assert ival_B.asfreq("Y") == ival_B_to_A + assert ival_B_end_of_year.asfreq("Y") == ival_B_to_A + assert ival_B.asfreq("Q") == ival_B_to_Q + assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q + assert ival_B.asfreq("M") == ival_B_to_M + assert ival_B_end_of_month.asfreq("M") == ival_B_to_M + assert ival_B.asfreq("W") == ival_B_to_W + assert ival_B_end_of_week.asfreq("W") == ival_B_to_W + + assert ival_B.asfreq("D") == ival_B_to_D + + assert ival_B.asfreq("h", "s") == ival_B_to_H_start + assert ival_B.asfreq("h", "E") == ival_B_to_H_end + assert ival_B.asfreq("Min", "s") == ival_B_to_T_start + assert ival_B.asfreq("Min", "E") == ival_B_to_T_end + assert ival_B.asfreq("s", "s") == ival_B_to_S_start + assert ival_B.asfreq("s", "E") == ival_B_to_S_end + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_B.asfreq("B") == ival_B + + def test_conv_daily(self): + # frequency conversion tests: from Business Frequency" + + ival_D = Period(freq="D", year=2007, month=1, day=1) + ival_D_end_of_year = Period(freq="D", year=2007, month=12, day=31) + ival_D_end_of_quarter = Period(freq="D", year=2007, month=3, day=31) + ival_D_end_of_month = Period(freq="D", year=2007, month=1, day=31) + ival_D_end_of_week = Period(freq="D", year=2007, month=1, day=7) + + ival_D_friday = Period(freq="D", year=2007, month=1, day=5) + ival_D_saturday = Period(freq="D", year=2007, month=1, day=6) + ival_D_sunday = Period(freq="D", year=2007, month=1, day=7) + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_B_friday = Period(freq="B", year=2007, month=1, day=5) + ival_B_monday = Period(freq="B", year=2007, month=1, day=8) + + ival_D_to_A = Period(freq="Y", year=2007) + + ival_Deoq_to_AJAN = Period(freq="Y-JAN", year=2008) + ival_Deoq_to_AJUN = Period(freq="Y-JUN", year=2007) + ival_Deoq_to_ADEC = Period(freq="Y-DEC", year=2007) + + ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4) + ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3) + ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1) + + ival_D_to_M = Period(freq="M", year=2007, month=1) + ival_D_to_W = Period(freq="W", year=2007, month=1, day=7) + + ival_D_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_D_to_H_end = Period(freq="h", year=2007, month=1, day=1, hour=23) + ival_D_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_D_to_T_end = Period( + freq="Min", year=2007, month=1, day=1, hour=23, minute=59 + ) + ival_D_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_D_to_S_end = Period( + freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 + ) + + assert ival_D.asfreq("Y") == ival_D_to_A + + assert ival_D_end_of_quarter.asfreq("Y-JAN") == ival_Deoq_to_AJAN + assert ival_D_end_of_quarter.asfreq("Y-JUN") == ival_Deoq_to_AJUN + assert ival_D_end_of_quarter.asfreq("Y-DEC") == ival_Deoq_to_ADEC + + assert ival_D_end_of_year.asfreq("Y") == ival_D_to_A + assert ival_D_end_of_quarter.asfreq("Q") == ival_D_to_QEDEC + assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN + assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN + assert ival_D.asfreq("Q-DEC") == ival_D_to_QEDEC + assert ival_D.asfreq("M") == ival_D_to_M + assert ival_D_end_of_month.asfreq("M") == ival_D_to_M + assert ival_D.asfreq("W") == ival_D_to_W + assert ival_D_end_of_week.asfreq("W") == ival_D_to_W + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_D_friday.asfreq("B") == ival_B_friday + assert ival_D_saturday.asfreq("B", "s") == ival_B_friday + assert ival_D_saturday.asfreq("B", "E") == ival_B_monday + assert ival_D_sunday.asfreq("B", "s") == ival_B_friday + assert ival_D_sunday.asfreq("B", "E") == ival_B_monday + + assert ival_D.asfreq("h", "s") == ival_D_to_H_start + assert ival_D.asfreq("h", "E") == ival_D_to_H_end + assert ival_D.asfreq("Min", "s") == ival_D_to_T_start + assert ival_D.asfreq("Min", "E") == ival_D_to_T_end + assert ival_D.asfreq("s", "s") == ival_D_to_S_start + assert ival_D.asfreq("s", "E") == ival_D_to_S_end + + assert ival_D.asfreq("D") == ival_D + + def test_conv_hourly(self): + # frequency conversion tests: from Hourly Frequency" + + ival_H = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_H_end_of_year = Period(freq="h", year=2007, month=12, day=31, hour=23) + ival_H_end_of_quarter = Period(freq="h", year=2007, month=3, day=31, hour=23) + ival_H_end_of_month = Period(freq="h", year=2007, month=1, day=31, hour=23) + ival_H_end_of_week = Period(freq="h", year=2007, month=1, day=7, hour=23) + ival_H_end_of_day = Period(freq="h", year=2007, month=1, day=1, hour=23) + ival_H_end_of_bus = Period(freq="h", year=2007, month=1, day=1, hour=23) + + ival_H_to_A = Period(freq="Y", year=2007) + ival_H_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_H_to_M = Period(freq="M", year=2007, month=1) + ival_H_to_W = Period(freq="W", year=2007, month=1, day=7) + ival_H_to_D = Period(freq="D", year=2007, month=1, day=1) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_H_to_B = Period(freq="B", year=2007, month=1, day=1) + + ival_H_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_H_to_T_end = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=59 + ) + ival_H_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_H_to_S_end = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=59, second=59 + ) + + assert ival_H.asfreq("Y") == ival_H_to_A + assert ival_H_end_of_year.asfreq("Y") == ival_H_to_A + assert ival_H.asfreq("Q") == ival_H_to_Q + assert ival_H_end_of_quarter.asfreq("Q") == ival_H_to_Q + assert ival_H.asfreq("M") == ival_H_to_M + assert ival_H_end_of_month.asfreq("M") == ival_H_to_M + assert ival_H.asfreq("W") == ival_H_to_W + assert ival_H_end_of_week.asfreq("W") == ival_H_to_W + assert ival_H.asfreq("D") == ival_H_to_D + assert ival_H_end_of_day.asfreq("D") == ival_H_to_D + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_H.asfreq("B") == ival_H_to_B + assert ival_H_end_of_bus.asfreq("B") == ival_H_to_B + + assert ival_H.asfreq("Min", "s") == ival_H_to_T_start + assert ival_H.asfreq("Min", "E") == ival_H_to_T_end + assert ival_H.asfreq("s", "s") == ival_H_to_S_start + assert ival_H.asfreq("s", "E") == ival_H_to_S_end + + assert ival_H.asfreq("h") == ival_H + + def test_conv_minutely(self): + # frequency conversion tests: from Minutely Frequency" + + ival_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0) + ival_T_end_of_year = Period( + freq="Min", year=2007, month=12, day=31, hour=23, minute=59 + ) + ival_T_end_of_quarter = Period( + freq="Min", year=2007, month=3, day=31, hour=23, minute=59 + ) + ival_T_end_of_month = Period( + freq="Min", year=2007, month=1, day=31, hour=23, minute=59 + ) + ival_T_end_of_week = Period( + freq="Min", year=2007, month=1, day=7, hour=23, minute=59 + ) + ival_T_end_of_day = Period( + freq="Min", year=2007, month=1, day=1, hour=23, minute=59 + ) + ival_T_end_of_bus = Period( + freq="Min", year=2007, month=1, day=1, hour=23, minute=59 + ) + ival_T_end_of_hour = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=59 + ) + + ival_T_to_A = Period(freq="Y", year=2007) + ival_T_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_T_to_M = Period(freq="M", year=2007, month=1) + ival_T_to_W = Period(freq="W", year=2007, month=1, day=7) + ival_T_to_D = Period(freq="D", year=2007, month=1, day=1) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_T_to_B = Period(freq="B", year=2007, month=1, day=1) + ival_T_to_H = Period(freq="h", year=2007, month=1, day=1, hour=0) + + ival_T_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_T_to_S_end = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=59 + ) + + assert ival_T.asfreq("Y") == ival_T_to_A + assert ival_T_end_of_year.asfreq("Y") == ival_T_to_A + assert ival_T.asfreq("Q") == ival_T_to_Q + assert ival_T_end_of_quarter.asfreq("Q") == ival_T_to_Q + assert ival_T.asfreq("M") == ival_T_to_M + assert ival_T_end_of_month.asfreq("M") == ival_T_to_M + assert ival_T.asfreq("W") == ival_T_to_W + assert ival_T_end_of_week.asfreq("W") == ival_T_to_W + assert ival_T.asfreq("D") == ival_T_to_D + assert ival_T_end_of_day.asfreq("D") == ival_T_to_D + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_T.asfreq("B") == ival_T_to_B + assert ival_T_end_of_bus.asfreq("B") == ival_T_to_B + assert ival_T.asfreq("h") == ival_T_to_H + assert ival_T_end_of_hour.asfreq("h") == ival_T_to_H + + assert ival_T.asfreq("s", "s") == ival_T_to_S_start + assert ival_T.asfreq("s", "E") == ival_T_to_S_end + + assert ival_T.asfreq("Min") == ival_T + + def test_conv_secondly(self): + # frequency conversion tests: from Secondly Frequency" + + ival_S = Period(freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0) + ival_S_end_of_year = Period( + freq="s", year=2007, month=12, day=31, hour=23, minute=59, second=59 + ) + ival_S_end_of_quarter = Period( + freq="s", year=2007, month=3, day=31, hour=23, minute=59, second=59 + ) + ival_S_end_of_month = Period( + freq="s", year=2007, month=1, day=31, hour=23, minute=59, second=59 + ) + ival_S_end_of_week = Period( + freq="s", year=2007, month=1, day=7, hour=23, minute=59, second=59 + ) + ival_S_end_of_day = Period( + freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 + ) + ival_S_end_of_bus = Period( + freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 + ) + ival_S_end_of_hour = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=59, second=59 + ) + ival_S_end_of_minute = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=59 + ) + + ival_S_to_A = Period(freq="Y", year=2007) + ival_S_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_S_to_M = Period(freq="M", year=2007, month=1) + ival_S_to_W = Period(freq="W", year=2007, month=1, day=7) + ival_S_to_D = Period(freq="D", year=2007, month=1, day=1) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_S_to_B = Period(freq="B", year=2007, month=1, day=1) + ival_S_to_H = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_S_to_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0) + + assert ival_S.asfreq("Y") == ival_S_to_A + assert ival_S_end_of_year.asfreq("Y") == ival_S_to_A + assert ival_S.asfreq("Q") == ival_S_to_Q + assert ival_S_end_of_quarter.asfreq("Q") == ival_S_to_Q + assert ival_S.asfreq("M") == ival_S_to_M + assert ival_S_end_of_month.asfreq("M") == ival_S_to_M + assert ival_S.asfreq("W") == ival_S_to_W + assert ival_S_end_of_week.asfreq("W") == ival_S_to_W + assert ival_S.asfreq("D") == ival_S_to_D + assert ival_S_end_of_day.asfreq("D") == ival_S_to_D + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_S.asfreq("B") == ival_S_to_B + assert ival_S_end_of_bus.asfreq("B") == ival_S_to_B + assert ival_S.asfreq("h") == ival_S_to_H + assert ival_S_end_of_hour.asfreq("h") == ival_S_to_H + assert ival_S.asfreq("Min") == ival_S_to_T + assert ival_S_end_of_minute.asfreq("Min") == ival_S_to_T + + assert ival_S.asfreq("s") == ival_S + + def test_conv_microsecond(self): + # GH#31475 Avoid floating point errors dropping the start_time to + # before the beginning of the Period + per = Period("2020-01-30 15:57:27.576166", freq="us") + assert per.ordinal == 1580399847576166 + + start = per.start_time + expected = Timestamp("2020-01-30 15:57:27.576166") + assert start == expected + assert start._value == per.ordinal * 1000 + + per2 = Period("2300-01-01", "us") + msg = "2300-01-01" + with pytest.raises(OutOfBoundsDatetime, match=msg): + per2.start_time + with pytest.raises(OutOfBoundsDatetime, match=msg): + per2.end_time + + def test_asfreq_mult(self): + # normal freq to mult freq + p = Period(freq="Y", year=2007) + # ordinal will not change + for freq in ["3Y", offsets.YearEnd(3)]: + result = p.asfreq(freq) + expected = Period("2007", freq="3Y") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + # ordinal will not change + for freq in ["3Y", offsets.YearEnd(3)]: + result = p.asfreq(freq, how="S") + expected = Period("2007", freq="3Y") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + # mult freq to normal freq + p = Period(freq="3Y", year=2007) + # ordinal will change because how=E is the default + for freq in ["Y", offsets.YearEnd()]: + result = p.asfreq(freq) + expected = Period("2009", freq="Y") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + # ordinal will not change + for freq in ["Y", offsets.YearEnd()]: + result = p.asfreq(freq, how="s") + expected = Period("2007", freq="Y") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + p = Period(freq="Y", year=2007) + for freq in ["2M", offsets.MonthEnd(2)]: + result = p.asfreq(freq) + expected = Period("2007-12", freq="2M") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + for freq in ["2M", offsets.MonthEnd(2)]: + result = p.asfreq(freq, how="s") + expected = Period("2007-01", freq="2M") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + p = Period(freq="3Y", year=2007) + for freq in ["2M", offsets.MonthEnd(2)]: + result = p.asfreq(freq) + expected = Period("2009-12", freq="2M") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + for freq in ["2M", offsets.MonthEnd(2)]: + result = p.asfreq(freq, how="s") + expected = Period("2007-01", freq="2M") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + def test_asfreq_combined(self): + # normal freq to combined freq + p = Period("2007", freq="h") + + # ordinal will not change + expected = Period("2007", freq="25h") + for freq, how in zip(["1D1h", "1h1D"], ["E", "S"]): + result = p.asfreq(freq, how=how) + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + # combined freq to normal freq + p1 = Period(freq="1D1h", year=2007) + p2 = Period(freq="1h1D", year=2007) + + # ordinal will change because how=E is the default + result1 = p1.asfreq("h") + result2 = p2.asfreq("h") + expected = Period("2007-01-02", freq="h") + assert result1 == expected + assert result1.ordinal == expected.ordinal + assert result1.freq == expected.freq + assert result2 == expected + assert result2.ordinal == expected.ordinal + assert result2.freq == expected.freq + + # ordinal will not change + result1 = p1.asfreq("h", how="S") + result2 = p2.asfreq("h", how="S") + expected = Period("2007-01-01", freq="h") + assert result1 == expected + assert result1.ordinal == expected.ordinal + assert result1.freq == expected.freq + assert result2 == expected + assert result2.ordinal == expected.ordinal + assert result2.freq == expected.freq + + def test_asfreq_MS(self): + initial = Period("2013") + + assert initial.asfreq(freq="M", how="S") == Period("2013-01", "M") + + msg = "MS is not supported as period frequency" + with pytest.raises(ValueError, match=msg): + initial.asfreq(freq="MS", how="S") + + with pytest.raises(ValueError, match=msg): + Period("2013-01", "MS") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72757ea00d0a42eb79257236d999ca90383d6dfa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_constructors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bd77625a8212f461cdec166a585dcdb152226ce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_formats.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea3b272c5430080c7aba9db94a8755cb6b9671c7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_formats.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_timedelta.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_timedelta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..447b112b28dada56898cd7456caa8ae29fd320ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_timedelta.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7e5d019c1352d7d12fb2d6d069f392acf726b5d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_as_unit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_as_unit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ddc360bc8a72d732bc6ecf43c8196d653498aa8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_as_unit.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_round.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_round.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d159085a65626fa5e55c3201a87d149389c9e5d9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_round.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_as_unit.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_as_unit.py new file mode 100644 index 0000000000000000000000000000000000000000..8660141e5a5372d4bb8e921bc8b3a5ee148e8900 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_as_unit.py @@ -0,0 +1,80 @@ +import pytest + +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas.errors import OutOfBoundsTimedelta + +from pandas import Timedelta + + +class TestAsUnit: + def test_as_unit(self): + td = Timedelta(days=1) + + assert td.as_unit("ns") is td + + res = td.as_unit("us") + assert res._value == td._value // 1000 + assert res._creso == NpyDatetimeUnit.NPY_FR_us.value + + rt = res.as_unit("ns") + assert rt._value == td._value + assert rt._creso == td._creso + + res = td.as_unit("ms") + assert res._value == td._value // 1_000_000 + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value + + rt = res.as_unit("ns") + assert rt._value == td._value + assert rt._creso == td._creso + + res = td.as_unit("s") + assert res._value == td._value // 1_000_000_000 + assert res._creso == NpyDatetimeUnit.NPY_FR_s.value + + rt = res.as_unit("ns") + assert rt._value == td._value + assert rt._creso == td._creso + + def test_as_unit_overflows(self): + # microsecond that would be just out of bounds for nano + us = 9223372800000000 + td = Timedelta._from_value_and_reso(us, NpyDatetimeUnit.NPY_FR_us.value) + + msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td.as_unit("ns") + + res = td.as_unit("ms") + assert res._value == us // 1000 + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value + + def test_as_unit_rounding(self): + td = Timedelta(microseconds=1500) + res = td.as_unit("ms") + + expected = Timedelta(milliseconds=1) + assert res == expected + + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._value == 1 + + with pytest.raises(ValueError, match="Cannot losslessly convert units"): + td.as_unit("ms", round_ok=False) + + def test_as_unit_non_nano(self): + # case where we are going neither to nor from nano + td = Timedelta(days=1).as_unit("ms") + assert td.days == 1 + assert td._value == 86_400_000 + assert td.components.days == 1 + assert td._d == 1 + assert td.total_seconds() == 86400 + + res = td.as_unit("us") + assert res._value == 86_400_000_000 + assert res.components.days == 1 + assert res.components.hours == 0 + assert res._d == 1 + assert res._h == 0 + assert res.total_seconds() == 86400 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_round.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_round.py new file mode 100644 index 0000000000000000000000000000000000000000..e54adb27d126bf13b454c513cecf847cdbb623bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_round.py @@ -0,0 +1,187 @@ +from hypothesis import ( + given, + strategies as st, +) +import numpy as np +import pytest + +from pandas._libs import lib +from pandas._libs.tslibs import iNaT +from pandas.errors import OutOfBoundsTimedelta + +from pandas import Timedelta + + +class TestTimedeltaRound: + @pytest.mark.parametrize( + "freq,s1,s2", + [ + # This first case has s1, s2 being the same as t1,t2 below + ( + "ns", + Timedelta("1 days 02:34:56.789123456"), + Timedelta("-1 days 02:34:56.789123456"), + ), + ( + "us", + Timedelta("1 days 02:34:56.789123000"), + Timedelta("-1 days 02:34:56.789123000"), + ), + ( + "ms", + Timedelta("1 days 02:34:56.789000000"), + Timedelta("-1 days 02:34:56.789000000"), + ), + ("s", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")), + ("2s", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")), + ("5s", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")), + ("min", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")), + ("12min", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")), + ("h", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")), + ("d", Timedelta("1 days"), Timedelta("-1 days")), + ], + ) + def test_round(self, freq, s1, s2): + t1 = Timedelta("1 days 02:34:56.789123456") + t2 = Timedelta("-1 days 02:34:56.789123456") + + r1 = t1.round(freq) + assert r1 == s1 + r2 = t2.round(freq) + assert r2 == s2 + + def test_round_invalid(self): + t1 = Timedelta("1 days 02:34:56.789123456") + + for freq, msg in [ + ("YE", " is a non-fixed frequency"), + ("ME", " is a non-fixed frequency"), + ("foobar", "Invalid frequency: foobar"), + ]: + with pytest.raises(ValueError, match=msg): + t1.round(freq) + + @pytest.mark.skip_ubsan + def test_round_implementation_bounds(self): + # See also: analogous test for Timestamp + # GH#38964 + result = Timedelta.min.ceil("s") + expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193) + assert result == expected + + result = Timedelta.max.floor("s") + expected = Timedelta.max - Timedelta(854775807) + assert result == expected + + msg = ( + r"Cannot round -106752 days \+00:12:43.145224193 to freq=s without overflow" + ) + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta.min.floor("s") + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta.min.round("s") + + msg = "Cannot round 106751 days 23:47:16.854775807 to freq=s without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta.max.ceil("s") + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta.max.round("s") + + @pytest.mark.skip_ubsan + @given(val=st.integers(min_value=iNaT + 1, max_value=lib.i8max)) + @pytest.mark.parametrize( + "method", [Timedelta.round, Timedelta.floor, Timedelta.ceil] + ) + def test_round_sanity(self, val, method): + cls = Timedelta + err_cls = OutOfBoundsTimedelta + + val = np.int64(val) + td = cls(val) + + def checker(ts, nanos, unit): + # First check that we do raise in cases where we should + if nanos == 1: + pass + else: + div, mod = divmod(ts._value, nanos) + diff = int(nanos - mod) + lb = ts._value - mod + assert lb <= ts._value # i.e. no overflows with python ints + ub = ts._value + diff + assert ub > ts._value # i.e. no overflows with python ints + + msg = "without overflow" + if mod == 0: + # We should never be raising in this + pass + elif method is cls.ceil: + if ub > cls.max._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + elif method is cls.floor: + if lb < cls.min._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + elif mod >= diff: + if ub > cls.max._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + elif lb < cls.min._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + + res = method(ts, unit) + + td = res - ts + diff = abs(td._value) + assert diff < nanos + assert res._value % nanos == 0 + + if method is cls.round: + assert diff <= nanos / 2 + elif method is cls.floor: + assert res <= ts + elif method is cls.ceil: + assert res >= ts + + nanos = 1 + checker(td, nanos, "ns") + + nanos = 1000 + checker(td, nanos, "us") + + nanos = 1_000_000 + checker(td, nanos, "ms") + + nanos = 1_000_000_000 + checker(td, nanos, "s") + + nanos = 60 * 1_000_000_000 + checker(td, nanos, "min") + + nanos = 60 * 60 * 1_000_000_000 + checker(td, nanos, "h") + + nanos = 24 * 60 * 60 * 1_000_000_000 + checker(td, nanos, "D") + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_round_non_nano(self, unit): + td = Timedelta("1 days 02:34:57").as_unit(unit) + + res = td.round("min") + assert res == Timedelta("1 days 02:35:00") + assert res._creso == td._creso + + res = td.floor("min") + assert res == Timedelta("1 days 02:34:00") + assert res._creso == td._creso + + res = td.ceil("min") + assert res == Timedelta("1 days 02:35:00") + assert res._creso == td._creso diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_to_pydatetime.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_to_pydatetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59b37706466cc10acdab59fc9bfba3f6c738b9d5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/__pycache__/test_to_pydatetime.cpython-310.pyc differ