diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d42b6541fdf8817b996ef9804db8a87a2bcd2c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/api/__init__.py @@ -0,0 +1,16 @@ +""" public toolkit API """ +from pandas.api import ( + extensions, + indexers, + interchange, + types, + typing, +) + +__all__ = [ + "interchange", + "extensions", + "indexers", + "types", + "typing", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f14d3440ad1509b8237c0007a9e4b7ac54d018c8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/extensions/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/api/extensions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea5f1ba926899f9d11e34e70181ed77cae7ead1d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/api/extensions/__init__.py @@ -0,0 +1,33 @@ +""" +Public API for extending pandas objects. +""" + +from pandas._libs.lib import no_default + +from pandas.core.dtypes.base import ( + ExtensionDtype, + register_extension_dtype, +) + +from pandas.core.accessor import ( + register_dataframe_accessor, + register_index_accessor, + register_series_accessor, +) +from pandas.core.algorithms import take +from pandas.core.arrays import ( + ExtensionArray, + ExtensionScalarOpsMixin, +) + +__all__ = [ + "no_default", + "ExtensionDtype", + "register_extension_dtype", + "register_dataframe_accessor", + "register_index_accessor", + "register_series_accessor", + "take", + "ExtensionArray", + "ExtensionScalarOpsMixin", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a585e8f21e494861701cdf15e1be82f4c390bd1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/indexers/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/api/indexers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..78357f11dc3b79f13490b91c69ef5457fbfa9768 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/api/indexers/__init__.py @@ -0,0 +1,17 @@ +""" +Public API for Rolling Window Indexers. +""" + +from pandas.core.indexers import check_array_indexer +from pandas.core.indexers.objects import ( + BaseIndexer, + FixedForwardWindowIndexer, + VariableOffsetWindowIndexer, +) + +__all__ = [ + "check_array_indexer", + "BaseIndexer", + "FixedForwardWindowIndexer", + "VariableOffsetWindowIndexer", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb2866a071c4317976a2e119f948bd6d8808fce5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/interchange/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/api/interchange/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f3a73bc46b3109c3c13e1a3468a69aed2ffb2e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/api/interchange/__init__.py @@ -0,0 +1,8 @@ +""" +Public API for DataFrame interchange protocol. +""" + +from pandas.core.interchange.dataframe_protocol import DataFrame +from pandas.core.interchange.from_dataframe import from_dataframe + +__all__ = ["from_dataframe", "DataFrame"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..831b4ea10325ab0526fd3a6784ead9b2999667a6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/types/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/api/types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c601086bb9f86a633d6a3f2245779fea9428bf95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/api/types/__init__.py @@ -0,0 +1,23 @@ +""" +Public toolkit API. +""" + +from pandas._libs.lib import infer_dtype + +from pandas.core.dtypes.api import * # noqa: F403 +from pandas.core.dtypes.concat import union_categoricals +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) + +__all__ = [ + "infer_dtype", + "union_categoricals", + "CategoricalDtype", + "DatetimeTZDtype", + "IntervalDtype", + "PeriodDtype", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37f2a1a7bf894df8aced386822b772be3d84d9d4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/typing/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/api/typing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b5d2cb06b523508b15025be804a66daaaaf7a45 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/api/typing/__init__.py @@ -0,0 +1,55 @@ +""" +Public API classes that store intermediate results useful for type-hinting. +""" + +from pandas._libs import NaTType +from pandas._libs.missing import NAType + +from pandas.core.groupby import ( + DataFrameGroupBy, + SeriesGroupBy, +) +from pandas.core.resample import ( + DatetimeIndexResamplerGroupby, + PeriodIndexResamplerGroupby, + Resampler, + TimedeltaIndexResamplerGroupby, + TimeGrouper, +) +from pandas.core.window import ( + Expanding, + ExpandingGroupby, + ExponentialMovingWindow, + ExponentialMovingWindowGroupby, + Rolling, + RollingGroupby, + Window, +) + +# TODO: Can't import Styler without importing jinja2 +# from pandas.io.formats.style import Styler +from pandas.io.json._json import JsonReader +from pandas.io.stata import StataReader + +__all__ = [ + "DataFrameGroupBy", + "DatetimeIndexResamplerGroupby", + "Expanding", + "ExpandingGroupby", + "ExponentialMovingWindow", + "ExponentialMovingWindowGroupby", + "JsonReader", + "NaTType", + "NAType", + "PeriodIndexResamplerGroupby", + "Resampler", + "Rolling", + "RollingGroupby", + "SeriesGroupBy", + "StataReader", + # See TODO above + # "Styler", + "TimedeltaIndexResamplerGroupby", + "TimeGrouper", + "Window", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/api/typing/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/api/typing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97b26b8b7a1bb44f5c60a5f956ef72162fe66018 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/api/typing/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6aef146acc25e97e0b01769847b1ffe0b1076da4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1808d694d38a8ee300554f98d676ee8fe4d5cefe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91910e74361b64daaf546f776800fc9c37259455 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..613783c455feda4eea1bd72ca8e88c464961a775 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..487df6fd7958e1029775bd40fba15a2b2d3125e2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fc34940903d363508893605f5167864cd7c61e7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_arithmetic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d2f20ea9b817c39c7bb665cb3e83f52192085d8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_constructors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e11f783dba21e31245a4b434fb98c8e03622ffb1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_contains.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_contains.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c30bf95d13fb9fc61d4f7ba82cf0dd6029270c86 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_contains.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_formats.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab387143ce8d840ee2560dbb4f483be8085ab85b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_formats.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_interval.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_interval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03d3d92cf9249c9ef18028dd3dacd26942c1a77b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_interval.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_overlaps.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_overlaps.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf35779a094db501a3aa7b18481ad071f81742a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_overlaps.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/test_arithmetic.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..5dc0858de466c136dd186f35505226aa5208de70 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/test_arithmetic.py @@ -0,0 +1,486 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas._libs.tslibs.period import IncompatibleFrequency + +from pandas import ( + NaT, + Period, + Timedelta, + Timestamp, + offsets, +) + + +class TestPeriodArithmetic: + def test_add_overflow_raises(self): + # GH#55503 + per = Timestamp.max.to_period("ns") + + msg = "|".join( + [ + "Python int too large to convert to C long", + # windows, 32bit linux builds + "int too big to convert", + ] + ) + with pytest.raises(OverflowError, match=msg): + per + 1 + + msg = "value too large" + with pytest.raises(OverflowError, match=msg): + per + Timedelta(1) + with pytest.raises(OverflowError, match=msg): + per + offsets.Nano(1) + + def test_period_add_integer(self): + per1 = Period(freq="D", year=2008, month=1, day=1) + per2 = Period(freq="D", year=2008, month=1, day=2) + assert per1 + 1 == per2 + assert 1 + per1 == per2 + + def test_period_add_invalid(self): + # GH#4731 + per1 = Period(freq="D", year=2008, month=1, day=1) + per2 = Period(freq="D", year=2008, month=1, day=2) + + msg = "|".join( + [ + r"unsupported operand type\(s\)", + "can only concatenate str", + "must be str, not Period", + ] + ) + with pytest.raises(TypeError, match=msg): + per1 + "str" + with pytest.raises(TypeError, match=msg): + "str" + per1 + with pytest.raises(TypeError, match=msg): + per1 + per2 + + def test_period_sub_period_annual(self): + left, right = Period("2011", freq="Y"), Period("2007", freq="Y") + result = left - right + assert result == 4 * right.freq + + msg = r"Input has different freq=M from Period\(freq=Y-DEC\)" + with pytest.raises(IncompatibleFrequency, match=msg): + left - Period("2007-01", freq="M") + + def test_period_sub_period(self): + per1 = Period("2011-01-01", freq="D") + per2 = Period("2011-01-15", freq="D") + + off = per1.freq + assert per1 - per2 == -14 * off + assert per2 - per1 == 14 * off + + msg = r"Input has different freq=M from Period\(freq=D\)" + with pytest.raises(IncompatibleFrequency, match=msg): + per1 - Period("2011-02", freq="M") + + @pytest.mark.parametrize("n", [1, 2, 3, 4]) + def test_sub_n_gt_1_ticks(self, tick_classes, n): + # GH#23878 + p1 = Period("19910905", freq=tick_classes(n)) + p2 = Period("19920406", freq=tick_classes(n)) + + expected = Period(str(p2), freq=p2.freq.base) - Period( + str(p1), freq=p1.freq.base + ) + + assert (p2 - p1) == expected + + @pytest.mark.parametrize("normalize", [True, False]) + @pytest.mark.parametrize("n", [1, 2, 3, 4]) + @pytest.mark.parametrize( + "offset, kwd_name", + [ + (offsets.YearEnd, "month"), + (offsets.QuarterEnd, "startingMonth"), + (offsets.MonthEnd, None), + (offsets.Week, "weekday"), + ], + ) + def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize): + # GH#23878 + kwds = {kwd_name: 3} if kwd_name is not None else {} + p1_d = "19910905" + p2_d = "19920406" + p1 = Period(p1_d, freq=offset(n, normalize, **kwds)) + p2 = Period(p2_d, freq=offset(n, normalize, **kwds)) + + expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base) + + assert (p2 - p1) == expected + + def test_period_add_offset(self): + # freq is DateOffset + for freq in ["Y", "2Y", "3Y"]: + per = Period("2011", freq=freq) + exp = Period("2013", freq=freq) + assert per + offsets.YearEnd(2) == exp + assert offsets.YearEnd(2) + per == exp + + for off in [ + offsets.YearBegin(2), + offsets.MonthBegin(1), + offsets.Minute(), + np.timedelta64(365, "D"), + timedelta(365), + ]: + msg = "Input has different freq|Input cannot be converted to Period" + with pytest.raises(IncompatibleFrequency, match=msg): + per + off + with pytest.raises(IncompatibleFrequency, match=msg): + off + per + + for freq in ["M", "2M", "3M"]: + per = Period("2011-03", freq=freq) + exp = Period("2011-05", freq=freq) + assert per + offsets.MonthEnd(2) == exp + assert offsets.MonthEnd(2) + per == exp + + exp = Period("2012-03", freq=freq) + assert per + offsets.MonthEnd(12) == exp + assert offsets.MonthEnd(12) + per == exp + + msg = "|".join( + [ + "Input has different freq", + "Input cannot be converted to Period", + ] + ) + + for off in [ + offsets.YearBegin(2), + offsets.MonthBegin(1), + offsets.Minute(), + np.timedelta64(365, "D"), + timedelta(365), + ]: + with pytest.raises(IncompatibleFrequency, match=msg): + per + off + with pytest.raises(IncompatibleFrequency, match=msg): + off + per + + # freq is Tick + for freq in ["D", "2D", "3D"]: + per = Period("2011-04-01", freq=freq) + + exp = Period("2011-04-06", freq=freq) + assert per + offsets.Day(5) == exp + assert offsets.Day(5) + per == exp + + exp = Period("2011-04-02", freq=freq) + assert per + offsets.Hour(24) == exp + assert offsets.Hour(24) + per == exp + + exp = Period("2011-04-03", freq=freq) + assert per + np.timedelta64(2, "D") == exp + assert np.timedelta64(2, "D") + per == exp + + exp = Period("2011-04-02", freq=freq) + assert per + np.timedelta64(3600 * 24, "s") == exp + assert np.timedelta64(3600 * 24, "s") + per == exp + + exp = Period("2011-03-30", freq=freq) + assert per + timedelta(-2) == exp + assert timedelta(-2) + per == exp + + exp = Period("2011-04-03", freq=freq) + assert per + timedelta(hours=48) == exp + assert timedelta(hours=48) + per == exp + + msg = "|".join( + [ + "Input has different freq", + "Input cannot be converted to Period", + ] + ) + + for off in [ + offsets.YearBegin(2), + offsets.MonthBegin(1), + offsets.Minute(), + np.timedelta64(4, "h"), + timedelta(hours=23), + ]: + with pytest.raises(IncompatibleFrequency, match=msg): + per + off + with pytest.raises(IncompatibleFrequency, match=msg): + off + per + + for freq in ["h", "2h", "3h"]: + per = Period("2011-04-01 09:00", freq=freq) + + exp = Period("2011-04-03 09:00", freq=freq) + assert per + offsets.Day(2) == exp + assert offsets.Day(2) + per == exp + + exp = Period("2011-04-01 12:00", freq=freq) + assert per + offsets.Hour(3) == exp + assert offsets.Hour(3) + per == exp + + msg = "cannot use operands with types" + exp = Period("2011-04-01 12:00", freq=freq) + assert per + np.timedelta64(3, "h") == exp + assert np.timedelta64(3, "h") + per == exp + + exp = Period("2011-04-01 10:00", freq=freq) + assert per + np.timedelta64(3600, "s") == exp + assert np.timedelta64(3600, "s") + per == exp + + exp = Period("2011-04-01 11:00", freq=freq) + assert per + timedelta(minutes=120) == exp + assert timedelta(minutes=120) + per == exp + + exp = Period("2011-04-05 12:00", freq=freq) + assert per + timedelta(days=4, minutes=180) == exp + assert timedelta(days=4, minutes=180) + per == exp + + msg = "|".join( + [ + "Input has different freq", + "Input cannot be converted to Period", + ] + ) + + for off in [ + offsets.YearBegin(2), + offsets.MonthBegin(1), + offsets.Minute(), + np.timedelta64(3200, "s"), + timedelta(hours=23, minutes=30), + ]: + with pytest.raises(IncompatibleFrequency, match=msg): + per + off + with pytest.raises(IncompatibleFrequency, match=msg): + off + per + + def test_period_sub_offset(self): + # freq is DateOffset + msg = "|".join( + [ + "Input has different freq", + "Input cannot be converted to Period", + ] + ) + + for freq in ["Y", "2Y", "3Y"]: + per = Period("2011", freq=freq) + assert per - offsets.YearEnd(2) == Period("2009", freq=freq) + + for off in [ + offsets.YearBegin(2), + offsets.MonthBegin(1), + offsets.Minute(), + np.timedelta64(365, "D"), + timedelta(365), + ]: + with pytest.raises(IncompatibleFrequency, match=msg): + per - off + + for freq in ["M", "2M", "3M"]: + per = Period("2011-03", freq=freq) + assert per - offsets.MonthEnd(2) == Period("2011-01", freq=freq) + assert per - offsets.MonthEnd(12) == Period("2010-03", freq=freq) + + for off in [ + offsets.YearBegin(2), + offsets.MonthBegin(1), + offsets.Minute(), + np.timedelta64(365, "D"), + timedelta(365), + ]: + with pytest.raises(IncompatibleFrequency, match=msg): + per - off + + # freq is Tick + for freq in ["D", "2D", "3D"]: + per = Period("2011-04-01", freq=freq) + assert per - offsets.Day(5) == Period("2011-03-27", freq=freq) + assert per - offsets.Hour(24) == Period("2011-03-31", freq=freq) + assert per - np.timedelta64(2, "D") == Period("2011-03-30", freq=freq) + assert per - np.timedelta64(3600 * 24, "s") == Period( + "2011-03-31", freq=freq + ) + assert per - timedelta(-2) == Period("2011-04-03", freq=freq) + assert per - timedelta(hours=48) == Period("2011-03-30", freq=freq) + + for off in [ + offsets.YearBegin(2), + offsets.MonthBegin(1), + offsets.Minute(), + np.timedelta64(4, "h"), + timedelta(hours=23), + ]: + with pytest.raises(IncompatibleFrequency, match=msg): + per - off + + for freq in ["h", "2h", "3h"]: + per = Period("2011-04-01 09:00", freq=freq) + assert per - offsets.Day(2) == Period("2011-03-30 09:00", freq=freq) + assert per - offsets.Hour(3) == Period("2011-04-01 06:00", freq=freq) + assert per - np.timedelta64(3, "h") == Period("2011-04-01 06:00", freq=freq) + assert per - np.timedelta64(3600, "s") == Period( + "2011-04-01 08:00", freq=freq + ) + assert per - timedelta(minutes=120) == Period("2011-04-01 07:00", freq=freq) + assert per - timedelta(days=4, minutes=180) == Period( + "2011-03-28 06:00", freq=freq + ) + + for off in [ + offsets.YearBegin(2), + offsets.MonthBegin(1), + offsets.Minute(), + np.timedelta64(3200, "s"), + timedelta(hours=23, minutes=30), + ]: + with pytest.raises(IncompatibleFrequency, match=msg): + per - off + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_period_addsub_nat(self, freq): + # GH#13071 + per = Period("2011-01", freq=freq) + + # For subtraction, NaT is treated as another Period object + assert NaT - per is NaT + assert per - NaT is NaT + + # For addition, NaT is treated as offset-like + assert NaT + per is NaT + assert per + NaT is NaT + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "m"]) + def test_period_add_sub_td64_nat(self, unit): + # GH#47196 + per = Period("2022-06-01", "D") + nat = np.timedelta64("NaT", unit) + + assert per + nat is NaT + assert nat + per is NaT + assert per - nat is NaT + + with pytest.raises(TypeError, match="unsupported operand"): + nat - per + + def test_period_ops_offset(self): + per = Period("2011-04-01", freq="D") + result = per + offsets.Day() + exp = Period("2011-04-02", freq="D") + assert result == exp + + result = per - offsets.Day(2) + exp = Period("2011-03-30", freq="D") + assert result == exp + + msg = r"Input cannot be converted to Period\(freq=D\)" + with pytest.raises(IncompatibleFrequency, match=msg): + per + offsets.Hour(2) + + with pytest.raises(IncompatibleFrequency, match=msg): + per - offsets.Hour(2) + + def test_period_add_timestamp_raises(self): + # GH#17983 + ts = Timestamp("2017") + per = Period("2017", freq="M") + + msg = r"unsupported operand type\(s\) for \+: 'Timestamp' and 'Period'" + with pytest.raises(TypeError, match=msg): + ts + per + + msg = r"unsupported operand type\(s\) for \+: 'Period' and 'Timestamp'" + with pytest.raises(TypeError, match=msg): + per + ts + + +class TestPeriodComparisons: + def test_period_comparison_same_freq(self): + jan = Period("2000-01", "M") + feb = Period("2000-02", "M") + + assert not jan == feb + assert jan != feb + assert jan < feb + assert jan <= feb + assert not jan > feb + assert not jan >= feb + + def test_period_comparison_same_period_different_object(self): + # Separate Period objects for the same period + left = Period("2000-01", "M") + right = Period("2000-01", "M") + + assert left == right + assert left >= right + assert left <= right + assert not left < right + assert not left > right + + def test_period_comparison_mismatched_freq(self): + jan = Period("2000-01", "M") + day = Period("2012-01-01", "D") + + assert not jan == day + assert jan != day + msg = r"Input has different freq=D from Period\(freq=M\)" + with pytest.raises(IncompatibleFrequency, match=msg): + jan < day + with pytest.raises(IncompatibleFrequency, match=msg): + jan <= day + with pytest.raises(IncompatibleFrequency, match=msg): + jan > day + with pytest.raises(IncompatibleFrequency, match=msg): + jan >= day + + def test_period_comparison_invalid_type(self): + jan = Period("2000-01", "M") + + assert not jan == 1 + assert jan != 1 + + int_or_per = "'(Period|int)'" + msg = f"not supported between instances of {int_or_per} and {int_or_per}" + for left, right in [(jan, 1), (1, jan)]: + with pytest.raises(TypeError, match=msg): + left > right + with pytest.raises(TypeError, match=msg): + left >= right + with pytest.raises(TypeError, match=msg): + left < right + with pytest.raises(TypeError, match=msg): + left <= right + + def test_period_comparison_nat(self): + per = Period("2011-01-01", freq="D") + + ts = Timestamp("2011-01-01") + # confirm Period('NaT') work identical with Timestamp('NaT') + for left, right in [ + (NaT, per), + (per, NaT), + (NaT, ts), + (ts, NaT), + ]: + assert not left < right + assert not left > right + assert not left == right + assert left != right + assert not left <= right + assert not left >= right + + @pytest.mark.parametrize( + "zerodim_arr, expected", + ((np.array(0), False), (np.array(Period("2000-01", "M")), True)), + ) + def test_period_comparison_numpy_zerodim_arr(self, zerodim_arr, expected): + per = Period("2000-01", "M") + + assert (per == zerodim_arr) is expected + assert (zerodim_arr == per) is expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/test_period.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/test_period.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3a0816737fccba83939c06238ec28a83550750 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/period/test_period.py @@ -0,0 +1,1154 @@ +from datetime import ( + date, + datetime, + timedelta, +) +import re + +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT +from pandas._libs.tslibs.ccalendar import ( + DAYS, + MONTHS, +) +from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime +from pandas._libs.tslibs.parsing import DateParseError +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG + +from pandas import ( + NaT, + Period, + Timedelta, + Timestamp, + offsets, +) +import pandas._testing as tm + +bday_msg = "Period with BDay freq is deprecated" + + +class TestPeriodDisallowedFreqs: + @pytest.mark.parametrize( + "freq, freq_msg", + [ + (offsets.BYearBegin(), "BYearBegin"), + (offsets.YearBegin(2), "YearBegin"), + (offsets.QuarterBegin(startingMonth=12), "QuarterBegin"), + (offsets.BusinessMonthEnd(2), "BusinessMonthEnd"), + ], + ) + def test_offsets_not_supported(self, freq, freq_msg): + # GH#55785 + msg = re.escape(f"{freq} is not supported as period frequency") + with pytest.raises(ValueError, match=msg): + Period(year=2014, freq=freq) + + def test_custom_business_day_freq_raises(self): + # GH#52534 + msg = "C is not supported as period frequency" + with pytest.raises(ValueError, match=msg): + Period("2023-04-10", freq="C") + msg = f"{offsets.CustomBusinessDay().base} is not supported as period frequency" + with pytest.raises(ValueError, match=msg): + Period("2023-04-10", freq=offsets.CustomBusinessDay()) + + def test_invalid_frequency_error_message(self): + msg = "WOM-1MON is not supported as period frequency" + with pytest.raises(ValueError, match=msg): + Period("2012-01-02", freq="WOM-1MON") + + def test_invalid_frequency_period_error_message(self): + msg = "for Period, please use 'M' instead of 'ME'" + with pytest.raises(ValueError, match=msg): + Period("2012-01-02", freq="ME") + + +class TestPeriodConstruction: + def test_from_td64nat_raises(self): + # GH#44507 + td = NaT.to_numpy("m8[ns]") + + msg = "Value must be Period, string, integer, or datetime" + with pytest.raises(ValueError, match=msg): + Period(td) + + with pytest.raises(ValueError, match=msg): + Period(td, freq="D") + + def test_construction(self): + i1 = Period("1/1/2005", freq="M") + i2 = Period("Jan 2005") + + assert i1 == i2 + + # GH#54105 - Period can be confusingly instantiated with lowercase freq + # TODO: raise in the future an error when passing lowercase freq + i1 = Period("2005", freq="Y") + i2 = Period("2005") + + assert i1 == i2 + + i4 = Period("2005", freq="M") + assert i1 != i4 + + i1 = Period.now(freq="Q") + i2 = Period(datetime.now(), freq="Q") + + assert i1 == i2 + + # Pass in freq as a keyword argument sometimes as a test for + # https://github.com/pandas-dev/pandas/issues/53369 + i1 = Period.now(freq="D") + i2 = Period(datetime.now(), freq="D") + i3 = Period.now(offsets.Day()) + + assert i1 == i2 + assert i1 == i3 + + i1 = Period("1982", freq="min") + msg = "'MIN' is deprecated and will be removed in a future version." + with tm.assert_produces_warning(FutureWarning, match=msg): + i2 = Period("1982", freq="MIN") + assert i1 == i2 + + i1 = Period(year=2005, month=3, day=1, freq="D") + i2 = Period("3/1/2005", freq="D") + assert i1 == i2 + + i3 = Period(year=2005, month=3, day=1, freq="d") + assert i1 == i3 + + i1 = Period("2007-01-01 09:00:00.001") + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="ms") + assert i1 == expected + + expected = Period("2007-01-01 09:00:00.001", freq="ms") + assert i1 == expected + + i1 = Period("2007-01-01 09:00:00.00101") + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="us") + assert i1 == expected + + expected = Period("2007-01-01 09:00:00.00101", freq="us") + assert i1 == expected + + msg = "Must supply freq for ordinal value" + with pytest.raises(ValueError, match=msg): + Period(ordinal=200701) + + msg = "Invalid frequency: X" + with pytest.raises(ValueError, match=msg): + Period("2007-1-1", freq="X") + + def test_tuple_freq_disallowed(self): + # GH#34703 tuple freq disallowed + with pytest.raises(TypeError, match="pass as a string instead"): + Period("1982", freq=("Min", 1)) + + with pytest.raises(TypeError, match="pass as a string instead"): + Period("2006-12-31", ("w", 1)) + + def test_construction_from_timestamp_nanos(self): + # GH#46811 don't drop nanos from Timestamp + ts = Timestamp("2022-04-20 09:23:24.123456789") + per = Period(ts, freq="ns") + + # should losslessly round-trip, not lose the 789 + rt = per.to_timestamp() + assert rt == ts + + # same thing but from a datetime64 object + dt64 = ts.asm8 + per2 = Period(dt64, freq="ns") + rt2 = per2.to_timestamp() + assert rt2.asm8 == dt64 + + def test_construction_bday(self): + # Biz day construction, roll forward if non-weekday + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + i1 = Period("3/10/12", freq="B") + i2 = Period("3/10/12", freq="D") + assert i1 == i2.asfreq("B") + i2 = Period("3/11/12", freq="D") + assert i1 == i2.asfreq("B") + i2 = Period("3/12/12", freq="D") + assert i1 == i2.asfreq("B") + + i3 = Period("3/10/12", freq="b") + assert i1 == i3 + + i1 = Period(year=2012, month=3, day=10, freq="B") + i2 = Period("3/12/12", freq="B") + assert i1 == i2 + + def test_construction_quarter(self): + i1 = Period(year=2005, quarter=1, freq="Q") + i2 = Period("1/1/2005", freq="Q") + assert i1 == i2 + + i1 = Period(year=2005, quarter=3, freq="Q") + i2 = Period("9/1/2005", freq="Q") + assert i1 == i2 + + i1 = Period("2005Q1") + i2 = Period(year=2005, quarter=1, freq="Q") + i3 = Period("2005q1") + assert i1 == i2 + assert i1 == i3 + + i1 = Period("05Q1") + assert i1 == i2 + lower = Period("05q1") + assert i1 == lower + + i1 = Period("1Q2005") + assert i1 == i2 + lower = Period("1q2005") + assert i1 == lower + + i1 = Period("1Q05") + assert i1 == i2 + lower = Period("1q05") + assert i1 == lower + + i1 = Period("4Q1984") + assert i1.year == 1984 + lower = Period("4q1984") + assert i1 == lower + + def test_construction_month(self): + expected = Period("2007-01", freq="M") + i1 = Period("200701", freq="M") + assert i1 == expected + + i1 = Period("200701", freq="M") + assert i1 == expected + + i1 = Period(200701, freq="M") + assert i1 == expected + + i1 = Period(ordinal=200701, freq="M") + assert i1.year == 18695 + + i1 = Period(datetime(2007, 1, 1), freq="M") + i2 = Period("200701", freq="M") + assert i1 == i2 + + i1 = Period(date(2007, 1, 1), freq="M") + i2 = Period(datetime(2007, 1, 1), freq="M") + i3 = Period(np.datetime64("2007-01-01"), freq="M") + i4 = Period("2007-01-01 00:00:00", freq="M") + i5 = Period("2007-01-01 00:00:00.000", freq="M") + assert i1 == i2 + assert i1 == i3 + assert i1 == i4 + assert i1 == i5 + + def test_period_constructor_offsets(self): + assert Period("1/1/2005", freq=offsets.MonthEnd()) == Period( + "1/1/2005", freq="M" + ) + assert Period("2005", freq=offsets.YearEnd()) == Period("2005", freq="Y") + assert Period("2005", freq=offsets.MonthEnd()) == Period("2005", freq="M") + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period("3/10/12", freq=offsets.BusinessDay()) == Period( + "3/10/12", freq="B" + ) + assert Period("3/10/12", freq=offsets.Day()) == Period("3/10/12", freq="D") + + assert Period( + year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12) + ) == Period(year=2005, quarter=1, freq="Q") + assert Period( + year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12) + ) == Period(year=2005, quarter=2, freq="Q") + + assert Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period( + year=2005, month=3, day=1, freq="D" + ) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period( + year=2012, month=3, day=10, freq="B" + ) + + expected = Period("2005-03-01", freq="3D") + assert Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected + assert Period(year=2005, month=3, day=1, freq="3D") == expected + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period( + year=2012, month=3, day=10, freq="3B" + ) + + assert Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq="M") + + i1 = Period(ordinal=200701, freq=offsets.MonthEnd()) + i2 = Period(ordinal=200701, freq="M") + assert i1 == i2 + assert i1.year == 18695 + assert i2.year == 18695 + + i1 = Period(datetime(2007, 1, 1), freq="M") + i2 = Period("200701", freq="M") + assert i1 == i2 + + i1 = Period(date(2007, 1, 1), freq="M") + i2 = Period(datetime(2007, 1, 1), freq="M") + i3 = Period(np.datetime64("2007-01-01"), freq="M") + i4 = Period("2007-01-01 00:00:00", freq="M") + i5 = Period("2007-01-01 00:00:00.000", freq="M") + assert i1 == i2 + assert i1 == i3 + assert i1 == i4 + assert i1 == i5 + + i1 = Period("2007-01-01 09:00:00.001") + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="ms") + assert i1 == expected + + expected = Period("2007-01-01 09:00:00.001", freq="ms") + assert i1 == expected + + i1 = Period("2007-01-01 09:00:00.00101") + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="us") + assert i1 == expected + + expected = Period("2007-01-01 09:00:00.00101", freq="us") + assert i1 == expected + + def test_invalid_arguments(self): + msg = "Must supply freq for datetime value" + with pytest.raises(ValueError, match=msg): + Period(datetime.now()) + with pytest.raises(ValueError, match=msg): + Period(datetime.now().date()) + + msg = "Value must be Period, string, integer, or datetime" + with pytest.raises(ValueError, match=msg): + Period(1.6, freq="D") + msg = "Ordinal must be an integer" + with pytest.raises(ValueError, match=msg): + Period(ordinal=1.6, freq="D") + msg = "Only value or ordinal but not both should be given but not both" + with pytest.raises(ValueError, match=msg): + Period(ordinal=2, value=1, freq="D") + + msg = "If value is None, freq cannot be None" + with pytest.raises(ValueError, match=msg): + Period(month=1) + + msg = '^Given date string "-2000" not likely a datetime$' + with pytest.raises(ValueError, match=msg): + Period("-2000", "Y") + msg = "day is out of range for month" + with pytest.raises(DateParseError, match=msg): + Period("0", "Y") + msg = "Unknown datetime string format, unable to parse" + with pytest.raises(DateParseError, match=msg): + Period("1/1/-2000", "Y") + + def test_constructor_corner(self): + expected = Period("2007-01", freq="2M") + assert Period(year=2007, month=1, freq="2M") == expected + + assert Period(None) is NaT + + p = Period("2007-01-01", freq="D") + + result = Period(p, freq="Y") + exp = Period("2007", freq="Y") + assert result == exp + + def test_constructor_infer_freq(self): + p = Period("2007-01-01") + assert p.freq == "D" + + p = Period("2007-01-01 07") + assert p.freq == "h" + + p = Period("2007-01-01 07:10") + assert p.freq == "min" + + p = Period("2007-01-01 07:10:15") + assert p.freq == "s" + + p = Period("2007-01-01 07:10:15.123") + assert p.freq == "ms" + + # We see that there are 6 digits after the decimal, so get microsecond + # even though they are all zeros. + p = Period("2007-01-01 07:10:15.123000") + assert p.freq == "us" + + p = Period("2007-01-01 07:10:15.123400") + assert p.freq == "us" + + def test_multiples(self): + result1 = Period("1989", freq="2Y") + result2 = Period("1989", freq="Y") + assert result1.ordinal == result2.ordinal + assert result1.freqstr == "2Y-DEC" + assert result2.freqstr == "Y-DEC" + assert result1.freq == offsets.YearEnd(2) + assert result2.freq == offsets.YearEnd() + + assert (result1 + 1).ordinal == result1.ordinal + 2 + assert (1 + result1).ordinal == result1.ordinal + 2 + assert (result1 - 1).ordinal == result2.ordinal - 2 + assert (-1 + result1).ordinal == result2.ordinal - 2 + + @pytest.mark.parametrize("month", MONTHS) + def test_period_cons_quarterly(self, month): + # bugs in scikits.timeseries + freq = f"Q-{month}" + exp = Period("1989Q3", freq=freq) + assert "1989Q3" in str(exp) + stamp = exp.to_timestamp("D", how="end") + p = Period(stamp, freq=freq) + assert p == exp + + stamp = exp.to_timestamp("3D", how="end") + p = Period(stamp, freq=freq) + assert p == exp + + @pytest.mark.parametrize("month", MONTHS) + def test_period_cons_annual(self, month): + # bugs in scikits.timeseries + freq = f"Y-{month}" + exp = Period("1989", freq=freq) + stamp = exp.to_timestamp("D", how="end") + timedelta(days=30) + p = Period(stamp, freq=freq) + + assert p == exp + 1 + assert isinstance(p, Period) + + @pytest.mark.parametrize("day", DAYS) + @pytest.mark.parametrize("num", range(10, 17)) + def test_period_cons_weekly(self, num, day): + daystr = f"2011-02-{num}" + freq = f"W-{day}" + + result = Period(daystr, freq=freq) + expected = Period(daystr, freq="D").asfreq(freq) + assert result == expected + assert isinstance(result, Period) + + def test_parse_week_str_roundstrip(self): + # GH#50803 + per = Period("2017-01-23/2017-01-29") + assert per.freq.freqstr == "W-SUN" + + per = Period("2017-01-24/2017-01-30") + assert per.freq.freqstr == "W-MON" + + msg = "Could not parse as weekly-freq Period" + with pytest.raises(ValueError, match=msg): + # not 6 days apart + Period("2016-01-23/2017-01-29") + + def test_period_from_ordinal(self): + p = Period("2011-01", freq="M") + res = Period._from_ordinal(p.ordinal, freq=p.freq) + assert p == res + assert isinstance(res, Period) + + @pytest.mark.parametrize("freq", ["Y", "M", "D", "h"]) + def test_construct_from_nat_string_and_freq(self, freq): + per = Period("NaT", freq=freq) + assert per is NaT + + per = Period("NaT", freq="2" + freq) + assert per is NaT + + per = Period("NaT", freq="3" + freq) + assert per is NaT + + def test_period_cons_nat(self): + p = Period("nat", freq="W-SUN") + assert p is NaT + + p = Period(iNaT, freq="D") + assert p is NaT + + p = Period(iNaT, freq="3D") + assert p is NaT + + p = Period(iNaT, freq="1D1h") + assert p is NaT + + p = Period("NaT") + assert p is NaT + + p = Period(iNaT) + assert p is NaT + + def test_period_cons_mult(self): + p1 = Period("2011-01", freq="3M") + p2 = Period("2011-01", freq="M") + assert p1.ordinal == p2.ordinal + + assert p1.freq == offsets.MonthEnd(3) + assert p1.freqstr == "3M" + + assert p2.freq == offsets.MonthEnd() + assert p2.freqstr == "M" + + result = p1 + 1 + assert result.ordinal == (p2 + 3).ordinal + + assert result.freq == p1.freq + assert result.freqstr == "3M" + + result = p1 - 1 + assert result.ordinal == (p2 - 3).ordinal + assert result.freq == p1.freq + assert result.freqstr == "3M" + + msg = "Frequency must be positive, because it represents span: -3M" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="-3M") + + msg = "Frequency must be positive, because it represents span: 0M" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="0M") + + def test_period_cons_combined(self): + p = [ + ( + Period("2011-01", freq="1D1h"), + Period("2011-01", freq="1h1D"), + Period("2011-01", freq="h"), + ), + ( + Period(ordinal=1, freq="1D1h"), + Period(ordinal=1, freq="1h1D"), + Period(ordinal=1, freq="h"), + ), + ] + + for p1, p2, p3 in p: + assert p1.ordinal == p3.ordinal + assert p2.ordinal == p3.ordinal + + assert p1.freq == offsets.Hour(25) + assert p1.freqstr == "25h" + + assert p2.freq == offsets.Hour(25) + assert p2.freqstr == "25h" + + assert p3.freq == offsets.Hour() + assert p3.freqstr == "h" + + result = p1 + 1 + assert result.ordinal == (p3 + 25).ordinal + assert result.freq == p1.freq + assert result.freqstr == "25h" + + result = p2 + 1 + assert result.ordinal == (p3 + 25).ordinal + assert result.freq == p2.freq + assert result.freqstr == "25h" + + result = p1 - 1 + assert result.ordinal == (p3 - 25).ordinal + assert result.freq == p1.freq + assert result.freqstr == "25h" + + result = p2 - 1 + assert result.ordinal == (p3 - 25).ordinal + assert result.freq == p2.freq + assert result.freqstr == "25h" + + msg = "Frequency must be positive, because it represents span: -25h" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="-1D1h") + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="-1h1D") + with pytest.raises(ValueError, match=msg): + Period(ordinal=1, freq="-1D1h") + with pytest.raises(ValueError, match=msg): + Period(ordinal=1, freq="-1h1D") + + msg = "Frequency must be positive, because it represents span: 0D" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="0D0h") + with pytest.raises(ValueError, match=msg): + Period(ordinal=1, freq="0D0h") + + # You can only combine together day and intraday offsets + msg = "Invalid frequency: 1W1D" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="1W1D") + msg = "Invalid frequency: 1D1W" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="1D1W") + + @pytest.mark.parametrize("day", ["1970/01/01 ", "2020-12-31 ", "1981/09/13 "]) + @pytest.mark.parametrize("hour", ["00:00:00", "00:00:01", "23:59:59", "12:00:59"]) + @pytest.mark.parametrize( + "sec_float, expected", + [ + (".000000001", 1), + (".000000999", 999), + (".123456789", 789), + (".999999999", 999), + (".999999000", 0), + # Test femtoseconds, attoseconds, picoseconds are dropped like Timestamp + (".999999001123", 1), + (".999999001123456", 1), + (".999999001123456789", 1), + ], + ) + def test_period_constructor_nanosecond(self, day, hour, sec_float, expected): + # GH 34621 + + assert Period(day + hour + sec_float).start_time.nanosecond == expected + + @pytest.mark.parametrize("hour", range(24)) + def test_period_large_ordinal(self, hour): + # Issue #36430 + # Integer overflow for Period over the maximum timestamp + p = Period(ordinal=2562048 + hour, freq="1h") + assert p.hour == hour + + +class TestPeriodMethods: + def test_round_trip(self): + p = Period("2000Q1") + new_p = tm.round_trip_pickle(p) + assert new_p == p + + def test_hash(self): + assert hash(Period("2011-01", freq="M")) == hash(Period("2011-01", freq="M")) + + assert hash(Period("2011-01-01", freq="D")) != hash(Period("2011-01", freq="M")) + + assert hash(Period("2011-01", freq="3M")) != hash(Period("2011-01", freq="2M")) + + assert hash(Period("2011-01", freq="M")) != hash(Period("2011-02", freq="M")) + + # -------------------------------------------------------------- + # to_timestamp + + def test_to_timestamp_mult(self): + p = Period("2011-01", freq="M") + assert p.to_timestamp(how="S") == Timestamp("2011-01-01") + expected = Timestamp("2011-02-01") - Timedelta(1, "ns") + assert p.to_timestamp(how="E") == expected + + p = Period("2011-01", freq="3M") + assert p.to_timestamp(how="S") == Timestamp("2011-01-01") + expected = Timestamp("2011-04-01") - Timedelta(1, "ns") + assert p.to_timestamp(how="E") == expected + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_to_timestamp(self): + p = Period("1982", freq="Y") + start_ts = p.to_timestamp(how="S") + aliases = ["s", "StarT", "BEGIn"] + for a in aliases: + assert start_ts == p.to_timestamp("D", how=a) + # freq with mult should not affect to the result + assert start_ts == p.to_timestamp("3D", how=a) + + end_ts = p.to_timestamp(how="E") + aliases = ["e", "end", "FINIsH"] + for a in aliases: + assert end_ts == p.to_timestamp("D", how=a) + assert end_ts == p.to_timestamp("3D", how=a) + + from_lst = ["Y", "Q", "M", "W", "B", "D", "h", "Min", "s"] + + def _ex(p): + if p.freq == "B": + return p.start_time + Timedelta(days=1, nanoseconds=-1) + return Timestamp((p + p.freq).start_time._value - 1) + + for fcode in from_lst: + p = Period("1982", freq=fcode) + result = p.to_timestamp().to_period(fcode) + assert result == p + + assert p.start_time == p.to_timestamp(how="S") + + assert p.end_time == _ex(p) + + # Frequency other than daily + + p = Period("1985", freq="Y") + + result = p.to_timestamp("h", how="end") + expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns") + assert result == expected + result = p.to_timestamp("3h", how="end") + assert result == expected + + result = p.to_timestamp("min", how="end") + expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns") + assert result == expected + result = p.to_timestamp("2min", how="end") + assert result == expected + + result = p.to_timestamp(how="end") + expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns") + assert result == expected + + expected = datetime(1985, 1, 1) + result = p.to_timestamp("h", how="start") + assert result == expected + result = p.to_timestamp("min", how="start") + assert result == expected + result = p.to_timestamp("s", how="start") + assert result == expected + result = p.to_timestamp("3h", how="start") + assert result == expected + result = p.to_timestamp("5s", how="start") + assert result == expected + + def test_to_timestamp_business_end(self): + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + per = Period("1990-01-05", "B") # Friday + result = per.to_timestamp("B", how="E") + + expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1) + assert result == expected + + @pytest.mark.parametrize( + "ts, expected", + [ + ("1970-01-01 00:00:00", 0), + ("1970-01-01 00:00:00.000001", 1), + ("1970-01-01 00:00:00.00001", 10), + ("1970-01-01 00:00:00.499", 499000), + ("1999-12-31 23:59:59.999", 999000), + ("1999-12-31 23:59:59.999999", 999999), + ("2050-12-31 23:59:59.5", 500000), + ("2050-12-31 23:59:59.500001", 500001), + ("2050-12-31 23:59:59.123456", 123456), + ], + ) + @pytest.mark.parametrize("freq", [None, "us", "ns"]) + def test_to_timestamp_microsecond(self, ts, expected, freq): + # GH 24444 + result = Period(ts).to_timestamp(freq=freq).microsecond + assert result == expected + + # -------------------------------------------------------------- + # Rendering: __repr__, strftime, etc + + @pytest.mark.parametrize( + "str_ts,freq,str_res,str_freq", + ( + ("Jan-2000", None, "2000-01", "M"), + ("2000-12-15", None, "2000-12-15", "D"), + ( + "2000-12-15 13:45:26.123456789", + "ns", + "2000-12-15 13:45:26.123456789", + "ns", + ), + ("2000-12-15 13:45:26.123456789", "us", "2000-12-15 13:45:26.123456", "us"), + ("2000-12-15 13:45:26.123456", None, "2000-12-15 13:45:26.123456", "us"), + ("2000-12-15 13:45:26.123456789", "ms", "2000-12-15 13:45:26.123", "ms"), + ("2000-12-15 13:45:26.123", None, "2000-12-15 13:45:26.123", "ms"), + ("2000-12-15 13:45:26", "s", "2000-12-15 13:45:26", "s"), + ("2000-12-15 13:45:26", "min", "2000-12-15 13:45", "min"), + ("2000-12-15 13:45:26", "h", "2000-12-15 13:00", "h"), + ("2000-12-15", "Y", "2000", "Y-DEC"), + ("2000-12-15", "Q", "2000Q4", "Q-DEC"), + ("2000-12-15", "M", "2000-12", "M"), + ("2000-12-15", "W", "2000-12-11/2000-12-17", "W-SUN"), + ("2000-12-15", "D", "2000-12-15", "D"), + ("2000-12-15", "B", "2000-12-15", "B"), + ), + ) + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_repr(self, str_ts, freq, str_res, str_freq): + p = Period(str_ts, freq=freq) + assert str(p) == str_res + assert repr(p) == f"Period('{str_res}', '{str_freq}')" + + def test_repr_nat(self): + p = Period("nat", freq="M") + assert repr(NaT) in repr(p) + + def test_strftime(self): + # GH#3363 + p = Period("2000-1-1 12:34:12", freq="s") + res = p.strftime("%Y-%m-%d %H:%M:%S") + assert res == "2000-01-01 12:34:12" + assert isinstance(res, str) + + +class TestPeriodProperties: + """Test properties such as year, month, weekday, etc....""" + + @pytest.mark.parametrize("freq", ["Y", "M", "D", "h"]) + def test_is_leap_year(self, freq): + # GH 13727 + p = Period("2000-01-01 00:00:00", freq=freq) + assert p.is_leap_year + assert isinstance(p.is_leap_year, bool) + + p = Period("1999-01-01 00:00:00", freq=freq) + assert not p.is_leap_year + + p = Period("2004-01-01 00:00:00", freq=freq) + assert p.is_leap_year + + p = Period("2100-01-01 00:00:00", freq=freq) + assert not p.is_leap_year + + def test_quarterly_negative_ordinals(self): + p = Period(ordinal=-1, freq="Q-DEC") + assert p.year == 1969 + assert p.quarter == 4 + assert isinstance(p, Period) + + p = Period(ordinal=-2, freq="Q-DEC") + assert p.year == 1969 + assert p.quarter == 3 + assert isinstance(p, Period) + + p = Period(ordinal=-2, freq="M") + assert p.year == 1969 + assert p.month == 11 + assert isinstance(p, Period) + + def test_freq_str(self): + i1 = Period("1982", freq="Min") + assert i1.freq == offsets.Minute() + assert i1.freqstr == "min" + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_period_deprecated_freq(self): + cases = { + "M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"], + "B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"], + "D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"], + "h": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"], + "min": ["minute", "MINUTE", "MINUTELY", "minutely"], + "s": ["sec", "SEC", "SECOND", "SECONDLY", "second"], + "ms": ["MILLISECOND", "MILLISECONDLY", "millisecond"], + "us": ["MICROSECOND", "MICROSECONDLY", "microsecond"], + "ns": ["NANOSECOND", "NANOSECONDLY", "nanosecond"], + } + + msg = INVALID_FREQ_ERR_MSG + for exp, freqs in cases.items(): + for freq in freqs: + with pytest.raises(ValueError, match=msg): + Period("2016-03-01 09:00", freq=freq) + with pytest.raises(ValueError, match=msg): + Period(ordinal=1, freq=freq) + + # check supported freq-aliases still works + p1 = Period("2016-03-01 09:00", freq=exp) + p2 = Period(ordinal=1, freq=exp) + assert isinstance(p1, Period) + assert isinstance(p2, Period) + + @staticmethod + def _period_constructor(bound, offset): + return Period( + year=bound.year, + month=bound.month, + day=bound.day, + hour=bound.hour, + minute=bound.minute, + second=bound.second + offset, + freq="us", + ) + + @pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)]) + @pytest.mark.parametrize("period_property", ["start_time", "end_time"]) + def test_outer_bounds_start_and_end_time(self, bound, offset, period_property): + # GH #13346 + period = TestPeriodProperties._period_constructor(bound, offset) + with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond"): + getattr(period, period_property) + + @pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)]) + @pytest.mark.parametrize("period_property", ["start_time", "end_time"]) + def test_inner_bounds_start_and_end_time(self, bound, offset, period_property): + # GH #13346 + period = TestPeriodProperties._period_constructor(bound, -offset) + expected = period.to_timestamp().round(freq="s") + assert getattr(period, period_property).round(freq="s") == expected + expected = (bound - offset * Timedelta(1, unit="s")).floor("s") + assert getattr(period, period_property).floor("s") == expected + + def test_start_time(self): + freq_lst = ["Y", "Q", "M", "D", "h", "min", "s"] + xp = datetime(2012, 1, 1) + for f in freq_lst: + p = Period("2012", freq=f) + assert p.start_time == xp + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period("2012", freq="B").start_time == datetime(2012, 1, 2) + assert Period("2012", freq="W").start_time == datetime(2011, 12, 26) + + def test_end_time(self): + p = Period("2012", freq="Y") + + def _ex(*args): + return Timestamp(Timestamp(datetime(*args)).as_unit("ns")._value - 1) + + xp = _ex(2013, 1, 1) + assert xp == p.end_time + + p = Period("2012", freq="Q") + xp = _ex(2012, 4, 1) + assert xp == p.end_time + + p = Period("2012", freq="M") + xp = _ex(2012, 2, 1) + assert xp == p.end_time + + p = Period("2012", freq="D") + xp = _ex(2012, 1, 2) + assert xp == p.end_time + + p = Period("2012", freq="h") + xp = _ex(2012, 1, 1, 1) + assert xp == p.end_time + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + p = Period("2012", freq="B") + xp = _ex(2012, 1, 3) + assert xp == p.end_time + + p = Period("2012", freq="W") + xp = _ex(2012, 1, 2) + assert xp == p.end_time + + # Test for GH 11738 + p = Period("2012", freq="15D") + xp = _ex(2012, 1, 16) + assert xp == p.end_time + + p = Period("2012", freq="1D1h") + xp = _ex(2012, 1, 2, 1) + assert xp == p.end_time + + p = Period("2012", freq="1h1D") + xp = _ex(2012, 1, 2, 1) + assert xp == p.end_time + + def test_end_time_business_friday(self): + # GH#34449 + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + per = Period("1990-01-05", "B") + result = per.end_time + + expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1) + assert result == expected + + def test_anchor_week_end_time(self): + def _ex(*args): + return Timestamp(Timestamp(datetime(*args)).as_unit("ns")._value - 1) + + p = Period("2013-1-1", "W-SAT") + xp = _ex(2013, 1, 6) + assert p.end_time == xp + + def test_properties_annually(self): + # Test properties on Periods with annually frequency. + a_date = Period(freq="Y", year=2007) + assert a_date.year == 2007 + + def test_properties_quarterly(self): + # Test properties on Periods with daily frequency. + qedec_date = Period(freq="Q-DEC", year=2007, quarter=1) + qejan_date = Period(freq="Q-JAN", year=2007, quarter=1) + qejun_date = Period(freq="Q-JUN", year=2007, quarter=1) + # + for x in range(3): + for qd in (qedec_date, qejan_date, qejun_date): + assert (qd + x).qyear == 2007 + assert (qd + x).quarter == x + 1 + + def test_properties_monthly(self): + # Test properties on Periods with daily frequency. + m_date = Period(freq="M", year=2007, month=1) + for x in range(11): + m_ival_x = m_date + x + assert m_ival_x.year == 2007 + if 1 <= x + 1 <= 3: + assert m_ival_x.quarter == 1 + elif 4 <= x + 1 <= 6: + assert m_ival_x.quarter == 2 + elif 7 <= x + 1 <= 9: + assert m_ival_x.quarter == 3 + elif 10 <= x + 1 <= 12: + assert m_ival_x.quarter == 4 + assert m_ival_x.month == x + 1 + + def test_properties_weekly(self): + # Test properties on Periods with daily frequency. + w_date = Period(freq="W", year=2007, month=1, day=7) + # + assert w_date.year == 2007 + assert w_date.quarter == 1 + assert w_date.month == 1 + assert w_date.week == 1 + assert (w_date - 1).week == 52 + assert w_date.days_in_month == 31 + assert Period(freq="W", year=2012, month=2, day=1).days_in_month == 29 + + def test_properties_weekly_legacy(self): + # Test properties on Periods with daily frequency. + w_date = Period(freq="W", year=2007, month=1, day=7) + assert w_date.year == 2007 + assert w_date.quarter == 1 + assert w_date.month == 1 + assert w_date.week == 1 + assert (w_date - 1).week == 52 + assert w_date.days_in_month == 31 + + exp = Period(freq="W", year=2012, month=2, day=1) + assert exp.days_in_month == 29 + + msg = INVALID_FREQ_ERR_MSG + with pytest.raises(ValueError, match=msg): + Period(freq="WK", year=2007, month=1, day=7) + + def test_properties_daily(self): + # Test properties on Periods with daily frequency. + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + b_date = Period(freq="B", year=2007, month=1, day=1) + # + assert b_date.year == 2007 + assert b_date.quarter == 1 + assert b_date.month == 1 + assert b_date.day == 1 + assert b_date.weekday == 0 + assert b_date.dayofyear == 1 + assert b_date.days_in_month == 31 + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period(freq="B", year=2012, month=2, day=1).days_in_month == 29 + + d_date = Period(freq="D", year=2007, month=1, day=1) + + assert d_date.year == 2007 + assert d_date.quarter == 1 + assert d_date.month == 1 + assert d_date.day == 1 + assert d_date.weekday == 0 + assert d_date.dayofyear == 1 + assert d_date.days_in_month == 31 + assert Period(freq="D", year=2012, month=2, day=1).days_in_month == 29 + + def test_properties_hourly(self): + # Test properties on Periods with hourly frequency. + h_date1 = Period(freq="h", year=2007, month=1, day=1, hour=0) + h_date2 = Period(freq="2h", year=2007, month=1, day=1, hour=0) + + for h_date in [h_date1, h_date2]: + assert h_date.year == 2007 + assert h_date.quarter == 1 + assert h_date.month == 1 + assert h_date.day == 1 + assert h_date.weekday == 0 + assert h_date.dayofyear == 1 + assert h_date.hour == 0 + assert h_date.days_in_month == 31 + assert ( + Period(freq="h", year=2012, month=2, day=1, hour=0).days_in_month == 29 + ) + + def test_properties_minutely(self): + # Test properties on Periods with minutely frequency. + t_date = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0) + # + assert t_date.quarter == 1 + assert t_date.month == 1 + assert t_date.day == 1 + assert t_date.weekday == 0 + assert t_date.dayofyear == 1 + assert t_date.hour == 0 + assert t_date.minute == 0 + assert t_date.days_in_month == 31 + assert ( + Period(freq="D", year=2012, month=2, day=1, hour=0, minute=0).days_in_month + == 29 + ) + + def test_properties_secondly(self): + # Test properties on Periods with secondly frequency. + s_date = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + # + assert s_date.year == 2007 + assert s_date.quarter == 1 + assert s_date.month == 1 + assert s_date.day == 1 + assert s_date.weekday == 0 + assert s_date.dayofyear == 1 + assert s_date.hour == 0 + assert s_date.minute == 0 + assert s_date.second == 0 + assert s_date.days_in_month == 31 + assert ( + Period( + freq="Min", year=2012, month=2, day=1, hour=0, minute=0, second=0 + ).days_in_month + == 29 + ) + + +class TestPeriodComparisons: + def test_sort_periods(self): + jan = Period("2000-01", "M") + feb = Period("2000-02", "M") + mar = Period("2000-03", "M") + periods = [mar, jan, feb] + correctPeriods = [jan, feb, mar] + assert sorted(periods) == correctPeriods + + +def test_period_immutable(): + # see gh-17116 + msg = "not writable" + + per = Period("2014Q1") + with pytest.raises(AttributeError, match=msg): + per.ordinal = 14 + + freq = per.freq + with pytest.raises(AttributeError, match=msg): + per.freq = 2 * freq + + +def test_small_year_parsing(): + per1 = Period("0001-01-07", "D") + assert per1.year == 1 + assert per1.day == 7 + + +def test_negone_ordinals(): + freqs = ["Y", "M", "Q", "D", "h", "min", "s"] + + period = Period(ordinal=-1, freq="D") + for freq in freqs: + repr(period.asfreq(freq)) + + for freq in freqs: + period = Period(ordinal=-1, freq=freq) + repr(period) + assert period.year == 1969 + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + period = Period(ordinal=-1, freq="B") + repr(period) + period = Period(ordinal=-1, freq="W") + repr(period) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/test_na_scalar.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/test_na_scalar.py new file mode 100644 index 0000000000000000000000000000000000000000..287b7557f50f9f6a81763f86d0eb616cfd730f8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/test_na_scalar.py @@ -0,0 +1,316 @@ +from datetime import ( + date, + time, + timedelta, +) +import pickle + +import numpy as np +import pytest + +from pandas._libs.missing import NA + +from pandas.core.dtypes.common import is_scalar + +import pandas as pd +import pandas._testing as tm + + +def test_singleton(): + assert NA is NA + new_NA = type(NA)() + assert new_NA is NA + + +def test_repr(): + assert repr(NA) == "" + assert str(NA) == "" + + +def test_format(): + # GH-34740 + assert format(NA) == "" + assert format(NA, ">10") == " " + assert format(NA, "xxx") == "" # NA is flexible, accept any format spec + + assert f"{NA}" == "" + assert f"{NA:>10}" == " " + assert f"{NA:xxx}" == "" + + +def test_truthiness(): + msg = "boolean value of NA is ambiguous" + + with pytest.raises(TypeError, match=msg): + bool(NA) + + with pytest.raises(TypeError, match=msg): + not NA + + +def test_hashable(): + assert hash(NA) == hash(NA) + d = {NA: "test"} + assert d[NA] == "test" + + +@pytest.mark.parametrize( + "other", [NA, 1, 1.0, "a", b"a", np.int64(1), np.nan], ids=repr +) +def test_arithmetic_ops(all_arithmetic_functions, other): + op = all_arithmetic_functions + + if op.__name__ in ("pow", "rpow", "rmod") and isinstance(other, (str, bytes)): + pytest.skip(reason=f"{op.__name__} with NA and {other} not defined.") + if op.__name__ in ("divmod", "rdivmod"): + assert op(NA, other) is (NA, NA) + else: + if op.__name__ == "rpow": + # avoid special case + other += 1 + assert op(NA, other) is NA + + +@pytest.mark.parametrize( + "other", + [ + NA, + 1, + 1.0, + "a", + b"a", + np.int64(1), + np.nan, + np.bool_(True), + time(0), + date(1, 2, 3), + timedelta(1), + pd.NaT, + ], +) +def test_comparison_ops(comparison_op, other): + assert comparison_op(NA, other) is NA + assert comparison_op(other, NA) is NA + + +@pytest.mark.parametrize( + "value", + [ + 0, + 0.0, + -0, + -0.0, + False, + np.bool_(False), + np.int_(0), + np.float64(0), + np.int_(-0), + np.float64(-0), + ], +) +@pytest.mark.parametrize("asarray", [True, False]) +def test_pow_special(value, asarray): + if asarray: + value = np.array([value]) + result = NA**value + + if asarray: + result = result[0] + else: + # this assertion isn't possible for ndarray. + assert isinstance(result, type(value)) + assert result == 1 + + +@pytest.mark.parametrize( + "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float64(1)] +) +@pytest.mark.parametrize("asarray", [True, False]) +def test_rpow_special(value, asarray): + if asarray: + value = np.array([value]) + result = value**NA + + if asarray: + result = result[0] + elif not isinstance(value, (np.float64, np.bool_, np.int_)): + # this assertion isn't possible with asarray=True + assert isinstance(result, type(value)) + + assert result == value + + +@pytest.mark.parametrize("value", [-1, -1.0, np.int_(-1), np.float64(-1)]) +@pytest.mark.parametrize("asarray", [True, False]) +def test_rpow_minus_one(value, asarray): + if asarray: + value = np.array([value]) + result = value**NA + + if asarray: + result = result[0] + + assert pd.isna(result) + + +def test_unary_ops(): + assert +NA is NA + assert -NA is NA + assert abs(NA) is NA + assert ~NA is NA + + +def test_logical_and(): + assert NA & True is NA + assert True & NA is NA + assert NA & False is False + assert False & NA is False + assert NA & NA is NA + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + NA & 5 + + +def test_logical_or(): + assert NA | True is True + assert True | NA is True + assert NA | False is NA + assert False | NA is NA + assert NA | NA is NA + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + NA | 5 + + +def test_logical_xor(): + assert NA ^ True is NA + assert True ^ NA is NA + assert NA ^ False is NA + assert False ^ NA is NA + assert NA ^ NA is NA + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + NA ^ 5 + + +def test_logical_not(): + assert ~NA is NA + + +@pytest.mark.parametrize("shape", [(3,), (3, 3), (1, 2, 3)]) +def test_arithmetic_ndarray(shape, all_arithmetic_functions): + op = all_arithmetic_functions + a = np.zeros(shape) + if op.__name__ == "pow": + a += 5 + result = op(NA, a) + expected = np.full(a.shape, NA, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_is_scalar(): + assert is_scalar(NA) is True + + +def test_isna(): + assert pd.isna(NA) is True + assert pd.notna(NA) is False + + +def test_series_isna(): + s = pd.Series([1, NA], dtype=object) + expected = pd.Series([False, True]) + tm.assert_series_equal(s.isna(), expected) + + +def test_ufunc(): + assert np.log(NA) is NA + assert np.add(NA, 1) is NA + result = np.divmod(NA, 1) + assert result[0] is NA and result[1] is NA + + result = np.frexp(NA) + assert result[0] is NA and result[1] is NA + + +def test_ufunc_raises(): + msg = "ufunc method 'at'" + with pytest.raises(ValueError, match=msg): + np.log.at(NA, 0) + + +def test_binary_input_not_dunder(): + a = np.array([1, 2, 3]) + expected = np.array([NA, NA, NA], dtype=object) + result = np.logaddexp(a, NA) + tm.assert_numpy_array_equal(result, expected) + + result = np.logaddexp(NA, a) + tm.assert_numpy_array_equal(result, expected) + + # all NA, multiple inputs + assert np.logaddexp(NA, NA) is NA + + result = np.modf(NA, NA) + assert len(result) == 2 + assert all(x is NA for x in result) + + +def test_divmod_ufunc(): + # binary in, binary out. + a = np.array([1, 2, 3]) + expected = np.array([NA, NA, NA], dtype=object) + + result = np.divmod(a, NA) + assert isinstance(result, tuple) + for arr in result: + tm.assert_numpy_array_equal(arr, expected) + tm.assert_numpy_array_equal(arr, expected) + + result = np.divmod(NA, a) + for arr in result: + tm.assert_numpy_array_equal(arr, expected) + tm.assert_numpy_array_equal(arr, expected) + + +def test_integer_hash_collision_dict(): + # GH 30013 + result = {NA: "foo", hash(NA): "bar"} + + assert result[NA] == "foo" + assert result[hash(NA)] == "bar" + + +def test_integer_hash_collision_set(): + # GH 30013 + result = {NA, hash(NA)} + + assert len(result) == 2 + assert NA in result + assert hash(NA) in result + + +def test_pickle_roundtrip(): + # https://github.com/pandas-dev/pandas/issues/31847 + result = pickle.loads(pickle.dumps(NA)) + assert result is NA + + +def test_pickle_roundtrip_pandas(): + result = tm.round_trip_pickle(NA) + assert result is NA + + +@pytest.mark.parametrize( + "values, dtype", [([1, 2, NA], "Int64"), (["A", "B", NA], "string")] +) +@pytest.mark.parametrize("as_frame", [True, False]) +def test_pickle_roundtrip_containers(as_frame, values, dtype): + s = pd.Series(pd.array(values, dtype=dtype)) + if as_frame: + s = s.to_frame(name="A") + result = tm.round_trip_pickle(s) + tm.assert_equal(result, s) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/test_nat.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/test_nat.py new file mode 100644 index 0000000000000000000000000000000000000000..cb046e0133245689f75c2def996bfabfb18e6185 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/test_nat.py @@ -0,0 +1,709 @@ +from datetime import ( + datetime, + timedelta, +) +import operator + +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs import iNaT +from pandas.compat.numpy import np_version_gte1p24p3 + +from pandas import ( + DatetimeIndex, + DatetimeTZDtype, + Index, + NaT, + Period, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, + isna, + offsets, +) +import pandas._testing as tm +from pandas.core import roperator +from pandas.core.arrays import ( + DatetimeArray, + PeriodArray, + TimedeltaArray, +) + + +class TestNaTFormatting: + def test_repr(self): + assert repr(NaT) == "NaT" + + def test_str(self): + assert str(NaT) == "NaT" + + def test_isoformat(self): + assert NaT.isoformat() == "NaT" + + +@pytest.mark.parametrize( + "nat,idx", + [ + (Timestamp("NaT"), DatetimeArray), + (Timedelta("NaT"), TimedeltaArray), + (Period("NaT", freq="M"), PeriodArray), + ], +) +def test_nat_fields(nat, idx): + for field in idx._field_ops: + # weekday is a property of DTI, but a method + # on NaT/Timestamp for compat with datetime + if field == "weekday": + continue + + result = getattr(NaT, field) + assert np.isnan(result) + + result = getattr(nat, field) + assert np.isnan(result) + + for field in idx._bool_ops: + result = getattr(NaT, field) + assert result is False + + result = getattr(nat, field) + assert result is False + + +def test_nat_vector_field_access(): + idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"]) + + for field in DatetimeArray._field_ops: + # weekday is a property of DTI, but a method + # on NaT/Timestamp for compat with datetime + if field == "weekday": + continue + + result = getattr(idx, field) + expected = Index([getattr(x, field) for x in idx]) + tm.assert_index_equal(result, expected) + + ser = Series(idx) + + for field in DatetimeArray._field_ops: + # weekday is a property of DTI, but a method + # on NaT/Timestamp for compat with datetime + if field == "weekday": + continue + + result = getattr(ser.dt, field) + expected = [getattr(x, field) for x in idx] + tm.assert_series_equal(result, Series(expected)) + + for field in DatetimeArray._bool_ops: + result = getattr(ser.dt, field) + expected = [getattr(x, field) for x in idx] + tm.assert_series_equal(result, Series(expected)) + + +@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period]) +@pytest.mark.parametrize( + "value", [None, np.nan, iNaT, float("nan"), NaT, "NaT", "nat", "", "NAT"] +) +def test_identity(klass, value): + assert klass(value) is NaT + + +@pytest.mark.parametrize("klass", [Timestamp, Timedelta]) +@pytest.mark.parametrize("method", ["round", "floor", "ceil"]) +@pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"]) +def test_round_nat(klass, method, freq): + # see gh-14940 + ts = klass("nat") + + round_method = getattr(ts, method) + assert round_method(freq) is ts + + +@pytest.mark.parametrize( + "method", + [ + "astimezone", + "combine", + "ctime", + "dst", + "fromordinal", + "fromtimestamp", + "fromisocalendar", + "isocalendar", + "strftime", + "strptime", + "time", + "timestamp", + "timetuple", + "timetz", + "toordinal", + "tzname", + "utcfromtimestamp", + "utcnow", + "utcoffset", + "utctimetuple", + "timestamp", + ], +) +def test_nat_methods_raise(method): + # see gh-9513, gh-17329 + msg = f"NaTType does not support {method}" + + with pytest.raises(ValueError, match=msg): + getattr(NaT, method)() + + +@pytest.mark.parametrize("method", ["weekday", "isoweekday"]) +def test_nat_methods_nan(method): + # see gh-9513, gh-17329 + assert np.isnan(getattr(NaT, method)()) + + +@pytest.mark.parametrize( + "method", ["date", "now", "replace", "today", "tz_convert", "tz_localize"] +) +def test_nat_methods_nat(method): + # see gh-8254, gh-9513, gh-17329 + assert getattr(NaT, method)() is NaT + + +@pytest.mark.parametrize( + "get_nat", [lambda x: NaT, lambda x: Timedelta(x), lambda x: Timestamp(x)] +) +def test_nat_iso_format(get_nat): + # see gh-12300 + assert get_nat("NaT").isoformat() == "NaT" + assert get_nat("NaT").isoformat(timespec="nanoseconds") == "NaT" + + +@pytest.mark.parametrize( + "klass,expected", + [ + (Timestamp, ["normalize", "to_julian_date", "to_period", "unit"]), + ( + Timedelta, + [ + "components", + "resolution_string", + "to_pytimedelta", + "to_timedelta64", + "unit", + "view", + ], + ), + ], +) +def test_missing_public_nat_methods(klass, expected): + # see gh-17327 + # + # NaT should have *most* of the Timestamp and Timedelta methods. + # Here, we check which public methods NaT does not have. We + # ignore any missing private methods. + nat_names = dir(NaT) + klass_names = dir(klass) + + missing = [x for x in klass_names if x not in nat_names and not x.startswith("_")] + missing.sort() + + assert missing == expected + + +def _get_overlap_public_nat_methods(klass, as_tuple=False): + """ + Get overlapping public methods between NaT and another class. + + Parameters + ---------- + klass : type + The class to compare with NaT + as_tuple : bool, default False + Whether to return a list of tuples of the form (klass, method). + + Returns + ------- + overlap : list + """ + nat_names = dir(NaT) + klass_names = dir(klass) + + overlap = [ + x + for x in nat_names + if x in klass_names and not x.startswith("_") and callable(getattr(klass, x)) + ] + + # Timestamp takes precedence over Timedelta in terms of overlap. + if klass is Timedelta: + ts_names = dir(Timestamp) + overlap = [x for x in overlap if x not in ts_names] + + if as_tuple: + overlap = [(klass, method) for method in overlap] + + overlap.sort() + return overlap + + +@pytest.mark.parametrize( + "klass,expected", + [ + ( + Timestamp, + [ + "as_unit", + "astimezone", + "ceil", + "combine", + "ctime", + "date", + "day_name", + "dst", + "floor", + "fromisocalendar", + "fromisoformat", + "fromordinal", + "fromtimestamp", + "isocalendar", + "isoformat", + "isoweekday", + "month_name", + "now", + "replace", + "round", + "strftime", + "strptime", + "time", + "timestamp", + "timetuple", + "timetz", + "to_datetime64", + "to_numpy", + "to_pydatetime", + "today", + "toordinal", + "tz_convert", + "tz_localize", + "tzname", + "utcfromtimestamp", + "utcnow", + "utcoffset", + "utctimetuple", + "weekday", + ], + ), + (Timedelta, ["total_seconds"]), + ], +) +def test_overlap_public_nat_methods(klass, expected): + # see gh-17327 + # + # NaT should have *most* of the Timestamp and Timedelta methods. + # In case when Timestamp, Timedelta, and NaT are overlap, the overlap + # is considered to be with Timestamp and NaT, not Timedelta. + assert _get_overlap_public_nat_methods(klass) == expected + + +@pytest.mark.parametrize( + "compare", + ( + _get_overlap_public_nat_methods(Timestamp, True) + + _get_overlap_public_nat_methods(Timedelta, True) + ), + ids=lambda x: f"{x[0].__name__}.{x[1]}", +) +def test_nat_doc_strings(compare): + # see gh-17327 + # + # The docstrings for overlapping methods should match. + klass, method = compare + klass_doc = getattr(klass, method).__doc__ + + if klass == Timestamp and method == "isoformat": + pytest.skip( + "Ignore differences with Timestamp.isoformat() as they're intentional" + ) + + if method == "to_numpy": + # GH#44460 can return either dt64 or td64 depending on dtype, + # different docstring is intentional + pytest.skip(f"different docstring for {method} is intentional") + + nat_doc = getattr(NaT, method).__doc__ + assert klass_doc == nat_doc + + +_ops = { + "left_plus_right": lambda a, b: a + b, + "right_plus_left": lambda a, b: b + a, + "left_minus_right": lambda a, b: a - b, + "right_minus_left": lambda a, b: b - a, + "left_times_right": lambda a, b: a * b, + "right_times_left": lambda a, b: b * a, + "left_div_right": lambda a, b: a / b, + "right_div_left": lambda a, b: b / a, +} + + +@pytest.mark.parametrize("op_name", list(_ops.keys())) +@pytest.mark.parametrize( + "value,val_type", + [ + (2, "scalar"), + (1.5, "floating"), + (np.nan, "floating"), + ("foo", "str"), + (timedelta(3600), "timedelta"), + (Timedelta("5s"), "timedelta"), + (datetime(2014, 1, 1), "timestamp"), + (Timestamp("2014-01-01"), "timestamp"), + (Timestamp("2014-01-01", tz="UTC"), "timestamp"), + (Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"), + (pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"), + ], +) +def test_nat_arithmetic_scalar(op_name, value, val_type): + # see gh-6873 + invalid_ops = { + "scalar": {"right_div_left"}, + "floating": { + "right_div_left", + "left_minus_right", + "right_minus_left", + "left_plus_right", + "right_plus_left", + }, + "str": set(_ops.keys()), + "timedelta": {"left_times_right", "right_times_left"}, + "timestamp": { + "left_times_right", + "right_times_left", + "left_div_right", + "right_div_left", + }, + } + + op = _ops[op_name] + + if op_name in invalid_ops.get(val_type, set()): + if ( + val_type == "timedelta" + and "times" in op_name + and isinstance(value, Timedelta) + ): + typs = "(Timedelta|NaTType)" + msg = rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'" + elif val_type == "str": + # un-specific check here because the message comes from str + # and varies by method + msg = "|".join( + [ + "can only concatenate str", + "unsupported operand type", + "can't multiply sequence", + "Can't convert 'NaTType'", + "must be str, not NaTType", + ] + ) + else: + msg = "unsupported operand type" + + with pytest.raises(TypeError, match=msg): + op(NaT, value) + else: + if val_type == "timedelta" and "div" in op_name: + expected = np.nan + else: + expected = NaT + + assert op(NaT, value) is expected + + +@pytest.mark.parametrize( + "val,expected", [(np.nan, NaT), (NaT, np.nan), (np.timedelta64("NaT"), np.nan)] +) +def test_nat_rfloordiv_timedelta(val, expected): + # see gh-#18846 + # + # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv + td = Timedelta(hours=3, minutes=4) + assert td // val is expected + + +@pytest.mark.parametrize( + "op_name", + ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], +) +@pytest.mark.parametrize( + "value", + [ + DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"), + DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"), + DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"], dtype="M8[ns]"), + DatetimeArray._from_sequence( + ["2011-01-01", "2011-01-02"], dtype=DatetimeTZDtype(tz="US/Pacific") + ), + TimedeltaIndex(["1 day", "2 day"], name="x"), + ], +) +def test_nat_arithmetic_index(op_name, value): + # see gh-11718 + exp_name = "x" + exp_data = [NaT] * 2 + + if value.dtype.kind == "M" and "plus" in op_name: + expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name) + else: + expected = TimedeltaIndex(exp_data, name=exp_name) + expected = expected.as_unit(value.unit) + + if not isinstance(value, Index): + expected = expected.array + + op = _ops[op_name] + result = op(NaT, value) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "op_name", + ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], +) +@pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence]) +def test_nat_arithmetic_td64_vector(op_name, box): + # see gh-19124 + vec = box(["1 day", "2 day"], dtype="timedelta64[ns]") + box_nat = box([NaT, NaT], dtype="timedelta64[ns]") + tm.assert_equal(_ops[op_name](vec, NaT), box_nat) + + +@pytest.mark.parametrize( + "dtype,op,out_dtype", + [ + ("datetime64[ns]", operator.add, "datetime64[ns]"), + ("datetime64[ns]", roperator.radd, "datetime64[ns]"), + ("datetime64[ns]", operator.sub, "timedelta64[ns]"), + ("datetime64[ns]", roperator.rsub, "timedelta64[ns]"), + ("timedelta64[ns]", operator.add, "datetime64[ns]"), + ("timedelta64[ns]", roperator.radd, "datetime64[ns]"), + ("timedelta64[ns]", operator.sub, "datetime64[ns]"), + ("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"), + ], +) +def test_nat_arithmetic_ndarray(dtype, op, out_dtype): + other = np.arange(10).astype(dtype) + result = op(NaT, other) + + expected = np.empty(other.shape, dtype=out_dtype) + expected.fill("NaT") + tm.assert_numpy_array_equal(result, expected) + + +def test_nat_pinned_docstrings(): + # see gh-17327 + assert NaT.ctime.__doc__ == Timestamp.ctime.__doc__ + + +def test_to_numpy_alias(): + # GH 24653: alias .to_numpy() for scalars + expected = NaT.to_datetime64() + result = NaT.to_numpy() + + assert isna(expected) and isna(result) + + # GH#44460 + result = NaT.to_numpy("M8[s]") + assert isinstance(result, np.datetime64) + assert result.dtype == "M8[s]" + + result = NaT.to_numpy("m8[ns]") + assert isinstance(result, np.timedelta64) + assert result.dtype == "m8[ns]" + + result = NaT.to_numpy("m8[s]") + assert isinstance(result, np.timedelta64) + assert result.dtype == "m8[s]" + + with pytest.raises(ValueError, match="NaT.to_numpy dtype must be a "): + NaT.to_numpy(np.int64) + + +@pytest.mark.parametrize( + "other", + [ + Timedelta(0), + Timedelta(0).to_pytimedelta(), + pytest.param( + Timedelta(0).to_timedelta64(), + marks=pytest.mark.xfail( + not np_version_gte1p24p3, + reason="td64 doesn't return NotImplemented, see numpy#17017", + # When this xfail is fixed, test_nat_comparisons_numpy + # can be removed. + ), + ), + Timestamp(0), + Timestamp(0).to_pydatetime(), + pytest.param( + Timestamp(0).to_datetime64(), + marks=pytest.mark.xfail( + not np_version_gte1p24p3, + reason="dt64 doesn't return NotImplemented, see numpy#17017", + ), + ), + Timestamp(0).tz_localize("UTC"), + NaT, + ], +) +def test_nat_comparisons(compare_operators_no_eq_ne, other): + # GH 26039 + opname = compare_operators_no_eq_ne + + assert getattr(NaT, opname)(other) is False + + op = getattr(operator, opname.strip("_")) + assert op(NaT, other) is False + assert op(other, NaT) is False + + +@pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")]) +def test_nat_comparisons_numpy(other): + # Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons + # pass, this test can be removed + assert not NaT == other + assert NaT != other + assert not NaT < other + assert not NaT > other + assert not NaT <= other + assert not NaT >= other + + +@pytest.mark.parametrize("other_and_type", [("foo", "str"), (2, "int"), (2.0, "float")]) +@pytest.mark.parametrize( + "symbol_and_op", + [("<=", operator.le), ("<", operator.lt), (">=", operator.ge), (">", operator.gt)], +) +def test_nat_comparisons_invalid(other_and_type, symbol_and_op): + # GH#35585 + other, other_type = other_and_type + symbol, op = symbol_and_op + + assert not NaT == other + assert not other == NaT + + assert NaT != other + assert other != NaT + + msg = f"'{symbol}' not supported between instances of 'NaTType' and '{other_type}'" + with pytest.raises(TypeError, match=msg): + op(NaT, other) + + msg = f"'{symbol}' not supported between instances of '{other_type}' and 'NaTType'" + with pytest.raises(TypeError, match=msg): + op(other, NaT) + + +@pytest.mark.parametrize( + "other", + [ + np.array(["foo"] * 2, dtype=object), + np.array([2, 3], dtype="int64"), + np.array([2.0, 3.5], dtype="float64"), + ], + ids=["str", "int", "float"], +) +def test_nat_comparisons_invalid_ndarray(other): + # GH#40722 + expected = np.array([False, False]) + result = NaT == other + tm.assert_numpy_array_equal(result, expected) + result = other == NaT + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([True, True]) + result = NaT != other + tm.assert_numpy_array_equal(result, expected) + result = other != NaT + tm.assert_numpy_array_equal(result, expected) + + for symbol, op in [ + ("<=", operator.le), + ("<", operator.lt), + (">=", operator.ge), + (">", operator.gt), + ]: + msg = f"'{symbol}' not supported between" + + with pytest.raises(TypeError, match=msg): + op(NaT, other) + + if other.dtype == np.dtype("object"): + # uses the reverse operator, so symbol changes + msg = None + with pytest.raises(TypeError, match=msg): + op(other, NaT) + + +def test_compare_date(fixed_now_ts): + # GH#39151 comparing NaT with date object is deprecated + # See also: tests.scalar.timestamps.test_comparisons::test_compare_date + + dt = fixed_now_ts.to_pydatetime().date() + + msg = "Cannot compare NaT with datetime.date object" + for left, right in [(NaT, dt), (dt, NaT)]: + assert not left == right + assert left != right + + with pytest.raises(TypeError, match=msg): + left < right + with pytest.raises(TypeError, match=msg): + left <= right + with pytest.raises(TypeError, match=msg): + left > right + with pytest.raises(TypeError, match=msg): + left >= right + + +@pytest.mark.parametrize( + "obj", + [ + offsets.YearEnd(2), + offsets.YearBegin(2), + offsets.MonthBegin(1), + offsets.MonthEnd(2), + offsets.MonthEnd(12), + offsets.Day(2), + offsets.Day(5), + offsets.Hour(24), + offsets.Hour(3), + offsets.Minute(), + np.timedelta64(3, "h"), + np.timedelta64(4, "h"), + np.timedelta64(3200, "s"), + np.timedelta64(3600, "s"), + np.timedelta64(3600 * 24, "s"), + np.timedelta64(2, "D"), + np.timedelta64(365, "D"), + timedelta(-2), + timedelta(365), + timedelta(minutes=120), + timedelta(days=4, minutes=180), + timedelta(hours=23), + timedelta(hours=23, minutes=30), + timedelta(hours=48), + ], +) +def test_nat_addsub_tdlike_scalar(obj): + assert NaT + obj is NaT + assert obj + NaT is NaT + assert NaT - obj is NaT + + +def test_pickle(): + # GH#4606 + p = tm.round_trip_pickle(NaT) + assert p is NaT diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_arithmetic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db8e69095c405b739df0b75e21da40c48734bf6a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_arithmetic.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..d2fa0f722ca6fa9d8c671896c68032799c0f1166 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -0,0 +1,1182 @@ +""" +Tests for scalar Timedelta arithmetic ops +""" +from datetime import ( + datetime, + timedelta, +) +import operator + +import numpy as np +import pytest + +from pandas.errors import OutOfBoundsTimedelta + +import pandas as pd +from pandas import ( + NaT, + Timedelta, + Timestamp, + offsets, +) +import pandas._testing as tm +from pandas.core import ops + + +class TestTimedeltaAdditionSubtraction: + """ + Tests for Timedelta methods: + + __add__, __radd__, + __sub__, __rsub__ + """ + + @pytest.mark.parametrize( + "ten_seconds", + [ + Timedelta(10, unit="s"), + timedelta(seconds=10), + np.timedelta64(10, "s"), + np.timedelta64(10000000000, "ns"), + offsets.Second(10), + ], + ) + def test_td_add_sub_ten_seconds(self, ten_seconds): + # GH#6808 + base = Timestamp("20130101 09:01:12.123456") + expected_add = Timestamp("20130101 09:01:22.123456") + expected_sub = Timestamp("20130101 09:01:02.123456") + + result = base + ten_seconds + assert result == expected_add + + result = base - ten_seconds + assert result == expected_sub + + @pytest.mark.parametrize( + "one_day_ten_secs", + [ + Timedelta("1 day, 00:00:10"), + Timedelta("1 days, 00:00:10"), + timedelta(days=1, seconds=10), + np.timedelta64(1, "D") + np.timedelta64(10, "s"), + offsets.Day() + offsets.Second(10), + ], + ) + def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs): + # GH#6808 + base = Timestamp("20130102 09:01:12.123456") + expected_add = Timestamp("20130103 09:01:22.123456") + expected_sub = Timestamp("20130101 09:01:02.123456") + + result = base + one_day_ten_secs + assert result == expected_add + + result = base - one_day_ten_secs + assert result == expected_sub + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_datetimelike_scalar(self, op): + # GH#19738 + td = Timedelta(10, unit="d") + + result = op(td, datetime(2016, 1, 1)) + if op is operator.add: + # datetime + Timedelta does _not_ call Timedelta.__radd__, + # so we get a datetime back instead of a Timestamp + assert isinstance(result, Timestamp) + assert result == Timestamp(2016, 1, 11) + + result = op(td, Timestamp("2018-01-12 18:09")) + assert isinstance(result, Timestamp) + assert result == Timestamp("2018-01-22 18:09") + + result = op(td, np.datetime64("2018-01-12")) + assert isinstance(result, Timestamp) + assert result == Timestamp("2018-01-22") + + result = op(td, NaT) + assert result is NaT + + def test_td_add_timestamp_overflow(self): + ts = Timestamp("1700-01-01").as_unit("ns") + msg = "Cannot cast 259987 from D to 'ns' without overflow." + with pytest.raises(OutOfBoundsTimedelta, match=msg): + ts + Timedelta(13 * 19999, unit="D") + + msg = "Cannot cast 259987 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + ts + timedelta(days=13 * 19999) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_td(self, op): + td = Timedelta(10, unit="d") + + result = op(td, Timedelta(days=10)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=20) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_pytimedelta(self, op): + td = Timedelta(10, unit="d") + result = op(td, timedelta(days=9)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=19) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_timedelta64(self, op): + td = Timedelta(10, unit="d") + result = op(td, np.timedelta64(-4, "D")) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=6) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_offset(self, op): + td = Timedelta(10, unit="d") + + result = op(td, offsets.Hour(6)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=10, hours=6) + + def test_td_sub_td(self): + td = Timedelta(10, unit="d") + expected = Timedelta(0, unit="ns") + result = td - td + assert isinstance(result, Timedelta) + assert result == expected + + def test_td_sub_pytimedelta(self): + td = Timedelta(10, unit="d") + expected = Timedelta(0, unit="ns") + + result = td - td.to_pytimedelta() + assert isinstance(result, Timedelta) + assert result == expected + + result = td.to_pytimedelta() - td + assert isinstance(result, Timedelta) + assert result == expected + + def test_td_sub_timedelta64(self): + td = Timedelta(10, unit="d") + expected = Timedelta(0, unit="ns") + + result = td - td.to_timedelta64() + assert isinstance(result, Timedelta) + assert result == expected + + result = td.to_timedelta64() - td + assert isinstance(result, Timedelta) + assert result == expected + + def test_td_sub_nat(self): + # In this context pd.NaT is treated as timedelta-like + td = Timedelta(10, unit="d") + result = td - NaT + assert result is NaT + + def test_td_sub_td64_nat(self): + td = Timedelta(10, unit="d") + td_nat = np.timedelta64("NaT") + + result = td - td_nat + assert result is NaT + + result = td_nat - td + assert result is NaT + + def test_td_sub_offset(self): + td = Timedelta(10, unit="d") + result = td - offsets.Hour(1) + assert isinstance(result, Timedelta) + assert result == Timedelta(239, unit="h") + + def test_td_add_sub_numeric_raises(self): + td = Timedelta(10, unit="d") + msg = "unsupported operand type" + for other in [2, 2.0, np.int64(2), np.float64(2)]: + with pytest.raises(TypeError, match=msg): + td + other + with pytest.raises(TypeError, match=msg): + other + td + with pytest.raises(TypeError, match=msg): + td - other + with pytest.raises(TypeError, match=msg): + other - td + + def test_td_add_sub_int_ndarray(self): + td = Timedelta("1 day") + other = np.array([1]) + + msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'" + with pytest.raises(TypeError, match=msg): + td + np.array([1]) + + msg = "|".join( + [ + ( + r"unsupported operand type\(s\) for \+: 'numpy.ndarray' " + "and 'Timedelta'" + ), + # This message goes on to say "Please do not rely on this error; + # it may not be given on all Python implementations" + "Concatenation operation is not implemented for NumPy arrays", + ] + ) + with pytest.raises(TypeError, match=msg): + other + td + msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'" + with pytest.raises(TypeError, match=msg): + td - other + msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + other - td + + def test_td_rsub_nat(self): + td = Timedelta(10, unit="d") + result = NaT - td + assert result is NaT + + result = np.datetime64("NaT") - td + assert result is NaT + + def test_td_rsub_offset(self): + result = offsets.Hour(1) - Timedelta(10, unit="d") + assert isinstance(result, Timedelta) + assert result == Timedelta(-239, unit="h") + + def test_td_sub_timedeltalike_object_dtype_array(self): + # GH#21980 + arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]) + exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")]) + res = arr - Timedelta("1D") + tm.assert_numpy_array_equal(res, exp) + + def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self): + # GH#21980 + now = Timestamp("2021-11-09 09:54:00") + arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")]) + exp = np.array( + [ + now - Timedelta("1D"), + Timedelta("0D"), + np.timedelta64(2, "h") - Timedelta("1D"), + ] + ) + res = arr - Timedelta("1D") + tm.assert_numpy_array_equal(res, exp) + + def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self): + # GH#21980 + now = Timestamp("2021-11-09 09:54:00") + arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")]) + msg = r"unsupported operand type\(s\) for \-: 'Timedelta' and 'Timestamp'" + with pytest.raises(TypeError, match=msg): + Timedelta("1D") - arr + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_timedeltalike_object_dtype_array(self, op): + # GH#21980 + arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]) + exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")]) + res = op(arr, Timedelta("1D")) + tm.assert_numpy_array_equal(res, exp) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_mixed_timedeltalike_object_dtype_array(self, op): + # GH#21980 + now = Timestamp("2021-11-09 09:54:00") + arr = np.array([now, Timedelta("1D")]) + exp = np.array([now + Timedelta("1D"), Timedelta("2D")]) + res = op(arr, Timedelta("1D")) + tm.assert_numpy_array_equal(res, exp) + + def test_td_add_sub_td64_ndarray(self): + td = Timedelta("1 day") + + other = np.array([td.to_timedelta64()]) + expected = np.array([Timedelta("2 Days").to_timedelta64()]) + + result = td + other + tm.assert_numpy_array_equal(result, expected) + result = other + td + tm.assert_numpy_array_equal(result, expected) + + result = td - other + tm.assert_numpy_array_equal(result, expected * 0) + result = other - td + tm.assert_numpy_array_equal(result, expected * 0) + + def test_td_add_sub_dt64_ndarray(self): + td = Timedelta("1 day") + other = np.array(["2000-01-01"], dtype="M8[ns]") + + expected = np.array(["2000-01-02"], dtype="M8[ns]") + tm.assert_numpy_array_equal(td + other, expected) + tm.assert_numpy_array_equal(other + td, expected) + + expected = np.array(["1999-12-31"], dtype="M8[ns]") + tm.assert_numpy_array_equal(-td + other, expected) + tm.assert_numpy_array_equal(other - td, expected) + + def test_td_add_sub_ndarray_0d(self): + td = Timedelta("1 day") + other = np.array(td.asm8) + + result = td + other + assert isinstance(result, Timedelta) + assert result == 2 * td + + result = other + td + assert isinstance(result, Timedelta) + assert result == 2 * td + + result = other - td + assert isinstance(result, Timedelta) + assert result == 0 * td + + result = td - other + assert isinstance(result, Timedelta) + assert result == 0 * td + + +class TestTimedeltaMultiplicationDivision: + """ + Tests for Timedelta methods: + + __mul__, __rmul__, + __div__, __rdiv__, + __truediv__, __rtruediv__, + __floordiv__, __rfloordiv__, + __mod__, __rmod__, + __divmod__, __rdivmod__ + """ + + # --------------------------------------------------------------- + # Timedelta.__mul__, __rmul__ + + @pytest.mark.parametrize( + "td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")] + ) + @pytest.mark.parametrize("op", [operator.mul, ops.rmul]) + def test_td_mul_nat(self, op, td_nat): + # GH#19819 + td = Timedelta(10, unit="d") + typs = "|".join(["numpy.timedelta64", "NaTType", "Timedelta"]) + msg = "|".join( + [ + rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'", + r"ufunc '?multiply'? cannot use operands with types", + ] + ) + with pytest.raises(TypeError, match=msg): + op(td, td_nat) + + @pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")]) + @pytest.mark.parametrize("op", [operator.mul, ops.rmul]) + def test_td_mul_nan(self, op, nan): + # np.float64('NaN') has a 'dtype' attr, avoid treating as array + td = Timedelta(10, unit="d") + result = op(td, nan) + assert result is NaT + + @pytest.mark.parametrize("op", [operator.mul, ops.rmul]) + def test_td_mul_scalar(self, op): + # GH#19738 + td = Timedelta(minutes=3) + + result = op(td, 2) + assert result == Timedelta(minutes=6) + + result = op(td, 1.5) + assert result == Timedelta(minutes=4, seconds=30) + + assert op(td, np.nan) is NaT + + assert op(-1, td)._value == -1 * td._value + assert op(-1.0, td)._value == -1.0 * td._value + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + # timedelta * datetime is gibberish + op(td, Timestamp(2016, 1, 2)) + + with pytest.raises(TypeError, match=msg): + # invalid multiply with another timedelta + op(td, td) + + def test_td_mul_numeric_ndarray(self): + td = Timedelta("1 day") + other = np.array([2]) + expected = np.array([Timedelta("2 Days").to_timedelta64()]) + + result = td * other + tm.assert_numpy_array_equal(result, expected) + + result = other * td + tm.assert_numpy_array_equal(result, expected) + + def test_td_mul_numeric_ndarray_0d(self): + td = Timedelta("1 day") + other = np.array(2) + assert other.ndim == 0 + expected = Timedelta("2 days") + + res = td * other + assert type(res) is Timedelta + assert res == expected + + res = other * td + assert type(res) is Timedelta + assert res == expected + + def test_td_mul_td64_ndarray_invalid(self): + td = Timedelta("1 day") + other = np.array([Timedelta("2 Days").to_timedelta64()]) + + msg = ( + "ufunc '?multiply'? cannot use operands with types " + rf"dtype\('{tm.ENDIAN}m8\[ns\]'\) and dtype\('{tm.ENDIAN}m8\[ns\]'\)" + ) + with pytest.raises(TypeError, match=msg): + td * other + with pytest.raises(TypeError, match=msg): + other * td + + # --------------------------------------------------------------- + # Timedelta.__div__, __truediv__ + + def test_td_div_timedeltalike_scalar(self): + # GH#19738 + td = Timedelta(10, unit="d") + + result = td / offsets.Hour(1) + assert result == 240 + + assert td / td == 1 + assert td / np.timedelta64(60, "h") == 4 + + assert np.isnan(td / NaT) + + def test_td_div_td64_non_nano(self): + # truediv + td = Timedelta("1 days 2 hours 3 ns") + result = td / np.timedelta64(1, "D") + assert result == td._value / (86400 * 10**9) + result = td / np.timedelta64(1, "s") + assert result == td._value / 10**9 + result = td / np.timedelta64(1, "ns") + assert result == td._value + + # floordiv + td = Timedelta("1 days 2 hours 3 ns") + result = td // np.timedelta64(1, "D") + assert result == 1 + result = td // np.timedelta64(1, "s") + assert result == 93600 + result = td // np.timedelta64(1, "ns") + assert result == td._value + + def test_td_div_numeric_scalar(self): + # GH#19738 + td = Timedelta(10, unit="d") + + result = td / 2 + assert isinstance(result, Timedelta) + assert result == Timedelta(days=5) + + result = td / 5 + assert isinstance(result, Timedelta) + assert result == Timedelta(days=2) + + @pytest.mark.parametrize( + "nan", + [ + np.nan, + np.float64("NaN"), + float("nan"), + ], + ) + def test_td_div_nan(self, nan): + # np.float64('NaN') has a 'dtype' attr, avoid treating as array + td = Timedelta(10, unit="d") + result = td / nan + assert result is NaT + + result = td // nan + assert result is NaT + + def test_td_div_td64_ndarray(self): + td = Timedelta("1 day") + + other = np.array([Timedelta("2 Days").to_timedelta64()]) + expected = np.array([0.5]) + + result = td / other + tm.assert_numpy_array_equal(result, expected) + + result = other / td + tm.assert_numpy_array_equal(result, expected * 4) + + def test_td_div_ndarray_0d(self): + td = Timedelta("1 day") + + other = np.array(1) + res = td / other + assert isinstance(res, Timedelta) + assert res == td + + # --------------------------------------------------------------- + # Timedelta.__rdiv__ + + def test_td_rdiv_timedeltalike_scalar(self): + # GH#19738 + td = Timedelta(10, unit="d") + result = offsets.Hour(1) / td + assert result == 1 / 240.0 + + assert np.timedelta64(60, "h") / td == 0.25 + + def test_td_rdiv_na_scalar(self): + # GH#31869 None gets cast to NaT + td = Timedelta(10, unit="d") + + result = NaT / td + assert np.isnan(result) + + result = None / td + assert np.isnan(result) + + result = np.timedelta64("NaT") / td + assert np.isnan(result) + + msg = r"unsupported operand type\(s\) for /: 'numpy.datetime64' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + np.datetime64("NaT") / td + + msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + np.nan / td + + def test_td_rdiv_ndarray(self): + td = Timedelta(10, unit="d") + + arr = np.array([td], dtype=object) + result = arr / td + expected = np.array([1], dtype=np.float64) + tm.assert_numpy_array_equal(result, expected) + + arr = np.array([None]) + result = arr / td + expected = np.array([np.nan]) + tm.assert_numpy_array_equal(result, expected) + + arr = np.array([np.nan], dtype=object) + msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + arr / td + + arr = np.array([np.nan], dtype=np.float64) + msg = "cannot use operands with types dtype" + with pytest.raises(TypeError, match=msg): + arr / td + + def test_td_rdiv_ndarray_0d(self): + td = Timedelta(10, unit="d") + + arr = np.array(td.asm8) + + assert arr / td == 1 + + # --------------------------------------------------------------- + # Timedelta.__floordiv__ + + def test_td_floordiv_timedeltalike_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + scalar = Timedelta(hours=3, minutes=3) + + assert td // scalar == 1 + assert -td // scalar.to_pytimedelta() == -2 + assert (2 * td) // scalar.to_timedelta64() == 2 + + def test_td_floordiv_null_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + + assert td // np.nan is NaT + assert np.isnan(td // NaT) + assert np.isnan(td // np.timedelta64("NaT")) + + def test_td_floordiv_offsets(self): + # GH#19738 + td = Timedelta(hours=3, minutes=4) + assert td // offsets.Hour(1) == 3 + assert td // offsets.Minute(2) == 92 + + def test_td_floordiv_invalid_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + + msg = "|".join( + [ + r"Invalid dtype datetime64\[D\] for __floordiv__", + "'dtype' is an invalid keyword argument for this function", + r"ufunc '?floor_divide'? cannot use operands with types", + ] + ) + with pytest.raises(TypeError, match=msg): + td // np.datetime64("2016-01-01", dtype="datetime64[us]") + + def test_td_floordiv_numeric_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + + expected = Timedelta(hours=1, minutes=32) + assert td // 2 == expected + assert td // 2.0 == expected + assert td // np.float64(2.0) == expected + assert td // np.int32(2.0) == expected + assert td // np.uint8(2.0) == expected + + def test_td_floordiv_timedeltalike_array(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + scalar = Timedelta(hours=3, minutes=3) + + # Array-like others + assert td // np.array(scalar.to_timedelta64()) == 1 + + res = (3 * td) // np.array([scalar.to_timedelta64()]) + expected = np.array([3], dtype=np.int64) + tm.assert_numpy_array_equal(res, expected) + + res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")]) + expected = np.array([10, np.nan]) + tm.assert_numpy_array_equal(res, expected) + + def test_td_floordiv_numeric_series(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + ser = pd.Series([1], dtype=np.int64) + res = td // ser + assert res.dtype.kind == "m" + + # --------------------------------------------------------------- + # Timedelta.__rfloordiv__ + + def test_td_rfloordiv_timedeltalike_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + scalar = Timedelta(hours=3, minutes=4) + + # scalar others + # x // Timedelta is defined only for timedelta-like x. int-like, + # float-like, and date-like, in particular, should all either + # a) raise TypeError directly or + # b) return NotImplemented, following which the reversed + # operation will raise TypeError. + assert td.__rfloordiv__(scalar) == 1 + assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2 + assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0 + + def test_td_rfloordiv_null_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + + assert np.isnan(td.__rfloordiv__(NaT)) + assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT"))) + + def test_td_rfloordiv_offsets(self): + # GH#19738 + assert offsets.Hour(1) // Timedelta(minutes=25) == 2 + + def test_td_rfloordiv_invalid_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + + dt64 = np.datetime64("2016-01-01", "us") + + assert td.__rfloordiv__(dt64) is NotImplemented + + msg = ( + r"unsupported operand type\(s\) for //: 'numpy.datetime64' and 'Timedelta'" + ) + with pytest.raises(TypeError, match=msg): + dt64 // td + + def test_td_rfloordiv_numeric_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + + assert td.__rfloordiv__(np.nan) is NotImplemented + assert td.__rfloordiv__(3.5) is NotImplemented + assert td.__rfloordiv__(2) is NotImplemented + assert td.__rfloordiv__(np.float64(2.0)) is NotImplemented + assert td.__rfloordiv__(np.uint8(9)) is NotImplemented + assert td.__rfloordiv__(np.int32(2.0)) is NotImplemented + + msg = r"unsupported operand type\(s\) for //: '.*' and 'Timedelta" + with pytest.raises(TypeError, match=msg): + np.float64(2.0) // td + with pytest.raises(TypeError, match=msg): + np.uint8(9) // td + with pytest.raises(TypeError, match=msg): + # deprecated GH#19761, enforced GH#29797 + np.int32(2.0) // td + + def test_td_rfloordiv_timedeltalike_array(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + scalar = Timedelta(hours=3, minutes=4) + + # Array-like others + assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1 + + res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()])) + expected = np.array([3], dtype=np.int64) + tm.assert_numpy_array_equal(res, expected) + + arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")]) + res = td.__rfloordiv__(arr) + expected = np.array([10, np.nan]) + tm.assert_numpy_array_equal(res, expected) + + def test_td_rfloordiv_intarray(self): + # deprecated GH#19761, enforced GH#29797 + ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10**9 + + msg = "Invalid dtype" + with pytest.raises(TypeError, match=msg): + ints // Timedelta(1, unit="s") + + def test_td_rfloordiv_numeric_series(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + ser = pd.Series([1], dtype=np.int64) + res = td.__rfloordiv__(ser) + assert res is NotImplemented + + msg = "Invalid dtype" + with pytest.raises(TypeError, match=msg): + # Deprecated GH#19761, enforced GH#29797 + ser // td + + # ---------------------------------------------------------------- + # Timedelta.__mod__, __rmod__ + + def test_mod_timedeltalike(self): + # GH#19365 + td = Timedelta(hours=37) + + # Timedelta-like others + result = td % Timedelta(hours=6) + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=1) + + result = td % timedelta(minutes=60) + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + result = td % NaT + assert result is NaT + + def test_mod_timedelta64_nat(self): + # GH#19365 + td = Timedelta(hours=37) + + result = td % np.timedelta64("NaT", "ns") + assert result is NaT + + def test_mod_timedelta64(self): + # GH#19365 + td = Timedelta(hours=37) + + result = td % np.timedelta64(2, "h") + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=1) + + def test_mod_offset(self): + # GH#19365 + td = Timedelta(hours=37) + + result = td % offsets.Hour(5) + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=2) + + def test_mod_numeric(self): + # GH#19365 + td = Timedelta(hours=37) + + # Numeric Others + result = td % 2 + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + result = td % 1e12 + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=3, seconds=20) + + result = td % int(1e12) + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=3, seconds=20) + + def test_mod_invalid(self): + # GH#19365 + td = Timedelta(hours=37) + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + td % Timestamp("2018-01-22") + + with pytest.raises(TypeError, match=msg): + td % [] + + def test_rmod_pytimedelta(self): + # GH#19365 + td = Timedelta(minutes=3) + + result = timedelta(minutes=4) % td + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=1) + + def test_rmod_timedelta64(self): + # GH#19365 + td = Timedelta(minutes=3) + result = np.timedelta64(5, "m") % td + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=2) + + def test_rmod_invalid(self): + # GH#19365 + td = Timedelta(minutes=3) + + msg = "unsupported operand" + with pytest.raises(TypeError, match=msg): + Timestamp("2018-01-22") % td + + with pytest.raises(TypeError, match=msg): + 15 % td + + with pytest.raises(TypeError, match=msg): + 16.0 % td + + msg = "Invalid dtype int" + with pytest.raises(TypeError, match=msg): + np.array([22, 24]) % td + + # ---------------------------------------------------------------- + # Timedelta.__divmod__, __rdivmod__ + + def test_divmod_numeric(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, 53 * 3600 * 1e9) + assert result[0] == Timedelta(1, unit="ns") + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=1) + + assert result + result = divmod(td, np.nan) + assert result[0] is NaT + assert result[1] is NaT + + def test_divmod(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, timedelta(days=1)) + assert result[0] == 2 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=6) + + result = divmod(td, 54) + assert result[0] == Timedelta(hours=1) + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(0) + + result = divmod(td, NaT) + assert np.isnan(result[0]) + assert result[1] is NaT + + def test_divmod_offset(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, offsets.Hour(-4)) + assert result[0] == -14 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=-2) + + def test_divmod_invalid(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + msg = r"unsupported operand type\(s\) for //: 'Timedelta' and 'Timestamp'" + with pytest.raises(TypeError, match=msg): + divmod(td, Timestamp("2018-01-22")) + + def test_rdivmod_pytimedelta(self): + # GH#19365 + result = divmod(timedelta(days=2, hours=6), Timedelta(days=1)) + assert result[0] == 2 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=6) + + def test_rdivmod_offset(self): + result = divmod(offsets.Hour(54), Timedelta(hours=-4)) + assert result[0] == -14 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=-2) + + def test_rdivmod_invalid(self): + # GH#19365 + td = Timedelta(minutes=3) + msg = "unsupported operand type" + + with pytest.raises(TypeError, match=msg): + divmod(Timestamp("2018-01-22"), td) + + with pytest.raises(TypeError, match=msg): + divmod(15, td) + + with pytest.raises(TypeError, match=msg): + divmod(16.0, td) + + msg = "Invalid dtype int" + with pytest.raises(TypeError, match=msg): + divmod(np.array([22, 24]), td) + + # ---------------------------------------------------------------- + + @pytest.mark.parametrize( + "op", [operator.mul, ops.rmul, operator.truediv, ops.rdiv, ops.rsub] + ) + @pytest.mark.parametrize( + "arr", + [ + np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]), + np.array([Timestamp("2021-11-09 09:54:00"), Timedelta("1D")]), + ], + ) + def test_td_op_timedelta_timedeltalike_array(self, op, arr): + msg = "unsupported operand type|cannot use operands with types" + with pytest.raises(TypeError, match=msg): + op(arr, Timedelta("1D")) + + +class TestTimedeltaComparison: + @pytest.mark.skip_ubsan + def test_compare_pytimedelta_bounds(self): + # GH#49021 don't overflow on comparison with very large pytimedeltas + + for unit in ["ns", "us"]: + tdmax = Timedelta.max.as_unit(unit).max + tdmin = Timedelta.min.as_unit(unit).min + + assert tdmax < timedelta.max + assert tdmax <= timedelta.max + assert not tdmax > timedelta.max + assert not tdmax >= timedelta.max + assert tdmax != timedelta.max + assert not tdmax == timedelta.max + + assert tdmin > timedelta.min + assert tdmin >= timedelta.min + assert not tdmin < timedelta.min + assert not tdmin <= timedelta.min + assert tdmin != timedelta.min + assert not tdmin == timedelta.min + + # But the "ms" and "s"-reso bounds extend pass pytimedelta + for unit in ["ms", "s"]: + tdmax = Timedelta.max.as_unit(unit).max + tdmin = Timedelta.min.as_unit(unit).min + + assert tdmax > timedelta.max + assert tdmax >= timedelta.max + assert not tdmax < timedelta.max + assert not tdmax <= timedelta.max + assert tdmax != timedelta.max + assert not tdmax == timedelta.max + + assert tdmin < timedelta.min + assert tdmin <= timedelta.min + assert not tdmin > timedelta.min + assert not tdmin >= timedelta.min + assert tdmin != timedelta.min + assert not tdmin == timedelta.min + + def test_compare_pytimedelta_bounds2(self): + # a pytimedelta outside the microsecond bounds + pytd = timedelta(days=999999999, seconds=86399) + # NB: np.timedelta64(td, "s"") incorrectly overflows + td64 = np.timedelta64(pytd.days, "D") + np.timedelta64(pytd.seconds, "s") + td = Timedelta(td64) + assert td.days == pytd.days + assert td.seconds == pytd.seconds + + assert td == pytd + assert not td != pytd + assert not td < pytd + assert not td > pytd + assert td <= pytd + assert td >= pytd + + td2 = td - Timedelta(seconds=1).as_unit("s") + assert td2 != pytd + assert not td2 == pytd + assert td2 < pytd + assert td2 <= pytd + assert not td2 > pytd + assert not td2 >= pytd + + def test_compare_tick(self, tick_classes): + cls = tick_classes + + off = cls(4) + td = off._as_pd_timedelta + assert isinstance(td, Timedelta) + + assert td == off + assert not td != off + assert td <= off + assert td >= off + assert not td < off + assert not td > off + + assert not td == 2 * off + assert td != 2 * off + assert td <= 2 * off + assert td < 2 * off + assert not td >= 2 * off + assert not td > 2 * off + + def test_comparison_object_array(self): + # analogous to GH#15183 + td = Timedelta("2 days") + other = Timedelta("3 hours") + + arr = np.array([other, td], dtype=object) + res = arr == td + expected = np.array([False, True], dtype=bool) + assert (res == expected).all() + + # 2D case + arr = np.array([[other, td], [td, other]], dtype=object) + res = arr != td + expected = np.array([[True, False], [False, True]], dtype=bool) + assert res.shape == expected.shape + assert (res == expected).all() + + def test_compare_timedelta_ndarray(self): + # GH#11835 + periods = [Timedelta("0 days 01:00:00"), Timedelta("0 days 01:00:00")] + arr = np.array(periods) + result = arr[0] > arr + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_td64_ndarray(self): + # GG#33441 + arr = np.arange(5).astype("timedelta64[ns]") + td = Timedelta(arr[1]) + + expected = np.array([False, True, False, False, False], dtype=bool) + + result = td == arr + tm.assert_numpy_array_equal(result, expected) + + result = arr == td + tm.assert_numpy_array_equal(result, expected) + + result = td != arr + tm.assert_numpy_array_equal(result, ~expected) + + result = arr != td + tm.assert_numpy_array_equal(result, ~expected) + + def test_compare_custom_object(self): + """ + Make sure non supported operations on Timedelta returns NonImplemented + and yields to other operand (GH#20829). + """ + + class CustomClass: + def __init__(self, cmp_result=None) -> None: + self.cmp_result = cmp_result + + def generic_result(self): + if self.cmp_result is None: + return NotImplemented + else: + return self.cmp_result + + def __eq__(self, other): + return self.generic_result() + + def __gt__(self, other): + return self.generic_result() + + t = Timedelta("1s") + + assert t != "string" + assert t != 1 + assert t != CustomClass() + assert t != CustomClass(cmp_result=False) + + assert t < CustomClass(cmp_result=True) + assert not t < CustomClass(cmp_result=False) + + assert t == CustomClass(cmp_result=True) + + @pytest.mark.parametrize("val", ["string", 1]) + def test_compare_unknown_type(self, val): + # GH#20829 + t = Timedelta("1s") + msg = "not supported between instances of 'Timedelta' and '(int|str)'" + with pytest.raises(TypeError, match=msg): + t >= val + with pytest.raises(TypeError, match=msg): + t > val + with pytest.raises(TypeError, match=msg): + t <= val + with pytest.raises(TypeError, match=msg): + t < val + + +def test_ops_notimplemented(): + class Other: + pass + + other = Other() + + td = Timedelta("1 day") + assert td.__add__(other) is NotImplemented + assert td.__sub__(other) is NotImplemented + assert td.__truediv__(other) is NotImplemented + assert td.__mul__(other) is NotImplemented + assert td.__floordiv__(other) is NotImplemented + + +def test_ops_error_str(): + # GH#13624 + td = Timedelta("1 day") + + for left, right in [(td, "a"), ("a", td)]: + msg = "|".join( + [ + "unsupported operand type", + r'can only concatenate str \(not "Timedelta"\) to str', + "must be str, not Timedelta", + ] + ) + with pytest.raises(TypeError, match=msg): + left + right + + msg = "not supported between instances of" + with pytest.raises(TypeError, match=msg): + left > right + + assert not left == right # pylint: disable=unneeded-not + assert left != right diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_constructors.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..4663f8cb719616cadd6e946c987e76bc3d979b01 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_constructors.py @@ -0,0 +1,698 @@ +from datetime import timedelta +from itertools import product + +import numpy as np +import pytest + +from pandas._libs.tslibs import OutOfBoundsTimedelta +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit + +from pandas import ( + Index, + NaT, + Timedelta, + TimedeltaIndex, + offsets, + to_timedelta, +) +import pandas._testing as tm + + +class TestTimedeltaConstructorUnitKeyword: + @pytest.mark.parametrize("unit", ["Y", "y", "M"]) + def test_unit_m_y_raises(self, unit): + msg = "Units 'M', 'Y', and 'y' are no longer supported" + + with pytest.raises(ValueError, match=msg): + Timedelta(10, unit) + + with pytest.raises(ValueError, match=msg): + to_timedelta(10, unit) + + with pytest.raises(ValueError, match=msg): + to_timedelta([1, 2], unit) + + @pytest.mark.parametrize( + "unit,unit_depr", + [ + ("h", "H"), + ("min", "T"), + ("s", "S"), + ("ms", "L"), + ("ns", "N"), + ("us", "U"), + ], + ) + def test_units_H_T_S_L_N_U_deprecated(self, unit, unit_depr): + # GH#52536 + msg = f"'{unit_depr}' is deprecated and will be removed in a future version." + + expected = Timedelta(1, unit=unit) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = Timedelta(1, unit=unit_depr) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "unit, np_unit", + [(value, "W") for value in ["W", "w"]] + + [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]] + + [ + (value, "m") + for value in [ + "m", + "minute", + "min", + "minutes", + "Minute", + "Min", + "Minutes", + ] + ] + + [ + (value, "s") + for value in [ + "s", + "seconds", + "sec", + "second", + "Seconds", + "Sec", + "Second", + ] + ] + + [ + (value, "ms") + for value in [ + "ms", + "milliseconds", + "millisecond", + "milli", + "millis", + "MS", + "Milliseconds", + "Millisecond", + "Milli", + "Millis", + ] + ] + + [ + (value, "us") + for value in [ + "us", + "microseconds", + "microsecond", + "micro", + "micros", + "u", + "US", + "Microseconds", + "Microsecond", + "Micro", + "Micros", + "U", + ] + ] + + [ + (value, "ns") + for value in [ + "ns", + "nanoseconds", + "nanosecond", + "nano", + "nanos", + "n", + "NS", + "Nanoseconds", + "Nanosecond", + "Nano", + "Nanos", + "N", + ] + ], + ) + @pytest.mark.parametrize("wrapper", [np.array, list, Index]) + def test_unit_parser(self, unit, np_unit, wrapper): + # validate all units, GH 6855, GH 21762 + # array-likes + expected = TimedeltaIndex( + [np.timedelta64(i, np_unit) for i in np.arange(5).tolist()], + dtype="m8[ns]", + ) + # TODO(2.0): the desired output dtype may have non-nano resolution + msg = f"'{unit}' is deprecated and will be removed in a future version." + + if (unit, np_unit) in (("u", "us"), ("U", "us"), ("n", "ns"), ("N", "ns")): + warn = FutureWarning + else: + warn = FutureWarning + msg = "The 'unit' keyword in TimedeltaIndex construction is deprecated" + with tm.assert_produces_warning(warn, match=msg): + result = to_timedelta(wrapper(range(5)), unit=unit) + tm.assert_index_equal(result, expected) + result = TimedeltaIndex(wrapper(range(5)), unit=unit) + tm.assert_index_equal(result, expected) + + str_repr = [f"{x}{unit}" for x in np.arange(5)] + result = to_timedelta(wrapper(str_repr)) + tm.assert_index_equal(result, expected) + result = to_timedelta(wrapper(str_repr)) + tm.assert_index_equal(result, expected) + + # scalar + expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]")) + result = to_timedelta(2, unit=unit) + assert result == expected + result = Timedelta(2, unit=unit) + assert result == expected + + result = to_timedelta(f"2{unit}") + assert result == expected + result = Timedelta(f"2{unit}") + assert result == expected + + +def test_construct_from_kwargs_overflow(): + # GH#55503 + msg = "seconds=86400000000000000000, milliseconds=0, microseconds=0, nanoseconds=0" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(days=10**6) + msg = "seconds=60000000000000000000, milliseconds=0, microseconds=0, nanoseconds=0" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(minutes=10**9) + + +def test_construct_with_weeks_unit_overflow(): + # GH#47268 don't silently wrap around + with pytest.raises(OutOfBoundsTimedelta, match="without overflow"): + Timedelta(1000000000000000000, unit="W") + + with pytest.raises(OutOfBoundsTimedelta, match="without overflow"): + Timedelta(1000000000000000000.0, unit="W") + + +def test_construct_from_td64_with_unit(): + # ignore the unit, as it may cause silently overflows leading to incorrect + # results, and in non-overflow cases is irrelevant GH#46827 + obj = np.timedelta64(123456789000000000, "h") + + with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"): + Timedelta(obj, unit="ps") + + with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"): + Timedelta(obj, unit="ns") + + with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"): + Timedelta(obj) + + +def test_from_td64_retain_resolution(): + # case where we retain millisecond resolution + obj = np.timedelta64(12345, "ms") + + td = Timedelta(obj) + assert td._value == obj.view("i8") + assert td._creso == NpyDatetimeUnit.NPY_FR_ms.value + + # Case where we cast to nearest-supported reso + obj2 = np.timedelta64(1234, "D") + td2 = Timedelta(obj2) + assert td2._creso == NpyDatetimeUnit.NPY_FR_s.value + assert td2 == obj2 + assert td2.days == 1234 + + # Case that _would_ overflow if we didn't support non-nano + obj3 = np.timedelta64(1000000000000000000, "us") + td3 = Timedelta(obj3) + assert td3.total_seconds() == 1000000000000 + assert td3._creso == NpyDatetimeUnit.NPY_FR_us.value + + +def test_from_pytimedelta_us_reso(): + # pytimedelta has microsecond resolution, so Timedelta(pytd) inherits that + td = timedelta(days=4, minutes=3) + result = Timedelta(td) + assert result.to_pytimedelta() == td + assert result._creso == NpyDatetimeUnit.NPY_FR_us.value + + +def test_from_tick_reso(): + tick = offsets.Nano() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ns.value + + tick = offsets.Micro() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_us.value + + tick = offsets.Milli() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ms.value + + tick = offsets.Second() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value + + # everything above Second gets cast to the closest supported reso: second + tick = offsets.Minute() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value + + tick = offsets.Hour() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value + + tick = offsets.Day() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value + + +def test_construction(): + expected = np.timedelta64(10, "D").astype("m8[ns]").view("i8") + assert Timedelta(10, unit="d")._value == expected + assert Timedelta(10.0, unit="d")._value == expected + assert Timedelta("10 days")._value == expected + assert Timedelta(days=10)._value == expected + assert Timedelta(days=10.0)._value == expected + + expected += np.timedelta64(10, "s").astype("m8[ns]").view("i8") + assert Timedelta("10 days 00:00:10")._value == expected + assert Timedelta(days=10, seconds=10)._value == expected + assert Timedelta(days=10, milliseconds=10 * 1000)._value == expected + assert Timedelta(days=10, microseconds=10 * 1000 * 1000)._value == expected + + # rounding cases + assert Timedelta(82739999850000)._value == 82739999850000 + assert "0 days 22:58:59.999850" in str(Timedelta(82739999850000)) + assert Timedelta(123072001000000)._value == 123072001000000 + assert "1 days 10:11:12.001" in str(Timedelta(123072001000000)) + + # string conversion with/without leading zero + # GH#9570 + assert Timedelta("0:00:00") == timedelta(hours=0) + assert Timedelta("00:00:00") == timedelta(hours=0) + assert Timedelta("-1:00:00") == -timedelta(hours=1) + assert Timedelta("-01:00:00") == -timedelta(hours=1) + + # more strings & abbrevs + # GH#8190 + assert Timedelta("1 h") == timedelta(hours=1) + assert Timedelta("1 hour") == timedelta(hours=1) + assert Timedelta("1 hr") == timedelta(hours=1) + assert Timedelta("1 hours") == timedelta(hours=1) + assert Timedelta("-1 hours") == -timedelta(hours=1) + assert Timedelta("1 m") == timedelta(minutes=1) + assert Timedelta("1.5 m") == timedelta(seconds=90) + assert Timedelta("1 minute") == timedelta(minutes=1) + assert Timedelta("1 minutes") == timedelta(minutes=1) + assert Timedelta("1 s") == timedelta(seconds=1) + assert Timedelta("1 second") == timedelta(seconds=1) + assert Timedelta("1 seconds") == timedelta(seconds=1) + assert Timedelta("1 ms") == timedelta(milliseconds=1) + assert Timedelta("1 milli") == timedelta(milliseconds=1) + assert Timedelta("1 millisecond") == timedelta(milliseconds=1) + assert Timedelta("1 us") == timedelta(microseconds=1) + assert Timedelta("1 µs") == timedelta(microseconds=1) + assert Timedelta("1 micros") == timedelta(microseconds=1) + assert Timedelta("1 microsecond") == timedelta(microseconds=1) + assert Timedelta("1.5 microsecond") == Timedelta("00:00:00.000001500") + assert Timedelta("1 ns") == Timedelta("00:00:00.000000001") + assert Timedelta("1 nano") == Timedelta("00:00:00.000000001") + assert Timedelta("1 nanosecond") == Timedelta("00:00:00.000000001") + + # combos + assert Timedelta("10 days 1 hour") == timedelta(days=10, hours=1) + assert Timedelta("10 days 1 h") == timedelta(days=10, hours=1) + assert Timedelta("10 days 1 h 1m 1s") == timedelta( + days=10, hours=1, minutes=1, seconds=1 + ) + assert Timedelta("-10 days 1 h 1m 1s") == -timedelta( + days=10, hours=1, minutes=1, seconds=1 + ) + assert Timedelta("-10 days 1 h 1m 1s") == -timedelta( + days=10, hours=1, minutes=1, seconds=1 + ) + assert Timedelta("-10 days 1 h 1m 1s 3us") == -timedelta( + days=10, hours=1, minutes=1, seconds=1, microseconds=3 + ) + assert Timedelta("-10 days 1 h 1.5m 1s 3us") == -timedelta( + days=10, hours=1, minutes=1, seconds=31, microseconds=3 + ) + + # Currently invalid as it has a - on the hh:mm:dd part + # (only allowed on the days) + msg = "only leading negative signs are allowed" + with pytest.raises(ValueError, match=msg): + Timedelta("-10 days -1 h 1.5m 1s 3us") + + # only leading neg signs are allowed + with pytest.raises(ValueError, match=msg): + Timedelta("10 days -1 h 1.5m 1s 3us") + + # no units specified + msg = "no units specified" + with pytest.raises(ValueError, match=msg): + Timedelta("3.1415") + + # invalid construction + msg = "cannot construct a Timedelta" + with pytest.raises(ValueError, match=msg): + Timedelta() + + msg = "unit abbreviation w/o a number" + with pytest.raises(ValueError, match=msg): + Timedelta("foo") + + msg = ( + "cannot construct a Timedelta from " + "the passed arguments, allowed keywords are " + ) + with pytest.raises(ValueError, match=msg): + Timedelta(day=10) + + # floats + expected = np.timedelta64(10, "s").astype("m8[ns]").view("i8") + np.timedelta64( + 500, "ms" + ).astype("m8[ns]").view("i8") + assert Timedelta(10.5, unit="s")._value == expected + + # offset + assert to_timedelta(offsets.Hour(2)) == Timedelta(hours=2) + assert Timedelta(offsets.Hour(2)) == Timedelta(hours=2) + assert Timedelta(offsets.Second(2)) == Timedelta(seconds=2) + + # GH#11995: unicode + expected = Timedelta("1h") + result = Timedelta("1h") + assert result == expected + assert to_timedelta(offsets.Hour(2)) == Timedelta("0 days, 02:00:00") + + msg = "unit abbreviation w/o a number" + with pytest.raises(ValueError, match=msg): + Timedelta("foo bar") + + +@pytest.mark.parametrize( + "item", + list( + { + "days": "D", + "seconds": "s", + "microseconds": "us", + "milliseconds": "ms", + "minutes": "m", + "hours": "h", + "weeks": "W", + }.items() + ), +) +@pytest.mark.parametrize( + "npdtype", [np.int64, np.int32, np.int16, np.float64, np.float32, np.float16] +) +def test_td_construction_with_np_dtypes(npdtype, item): + # GH#8757: test construction with np dtypes + pykwarg, npkwarg = item + expected = np.timedelta64(1, npkwarg).astype("m8[ns]").view("i8") + assert Timedelta(**{pykwarg: npdtype(1)})._value == expected + + +@pytest.mark.parametrize( + "val", + [ + "1s", + "-1s", + "1us", + "-1us", + "1 day", + "-1 day", + "-23:59:59.999999", + "-1 days +23:59:59.999999", + "-1ns", + "1ns", + "-23:59:59.999999999", + ], +) +def test_td_from_repr_roundtrip(val): + # round-trip both for string and value + td = Timedelta(val) + assert Timedelta(td._value) == td + + assert Timedelta(str(td)) == td + assert Timedelta(td._repr_base(format="all")) == td + assert Timedelta(td._repr_base()) == td + + +def test_overflow_on_construction(): + # GH#3374 + value = Timedelta("1day")._value * 20169940 + msg = "Cannot cast 1742682816000000000000 from ns to 'ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(value) + + # xref GH#17637 + msg = "Cannot cast 139993 from D to 'ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(7 * 19999, unit="D") + + # used to overflow before non-ns support + td = Timedelta(timedelta(days=13 * 19999)) + assert td._creso == NpyDatetimeUnit.NPY_FR_us.value + assert td.days == 13 * 19999 + + +@pytest.mark.parametrize( + "val, unit", + [ + (15251, "W"), # 1 + (106752, "D"), # change from previous: + (2562048, "h"), # 0 hours + (153722868, "m"), # 13 minutes + (9223372037, "s"), # 44 seconds + ], +) +def test_construction_out_of_bounds_td64ns(val, unit): + # TODO: parametrize over units just above/below the implementation bounds + # once GH#38964 is resolved + + # Timedelta.max is just under 106752 days + td64 = np.timedelta64(val, unit) + assert td64.astype("m8[ns]").view("i8") < 0 # i.e. naive astype will be wrong + + td = Timedelta(td64) + if unit != "M": + # with unit="M" the conversion to "s" is poorly defined + # (and numpy issues DeprecationWarning) + assert td.asm8 == td64 + assert td.asm8.dtype == "m8[s]" + msg = r"Cannot cast 1067\d\d days .* to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td.as_unit("ns") + + # But just back in bounds and we are OK + assert Timedelta(td64 - 1) == td64 - 1 + + td64 *= -1 + assert td64.astype("m8[ns]").view("i8") > 0 # i.e. naive astype will be wrong + + td2 = Timedelta(td64) + msg = r"Cannot cast -1067\d\d days .* to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td2.as_unit("ns") + + # But just back in bounds and we are OK + assert Timedelta(td64 + 1) == td64 + 1 + + +@pytest.mark.parametrize( + "val, unit", + [ + (15251 * 10**9, "W"), + (106752 * 10**9, "D"), + (2562048 * 10**9, "h"), + (153722868 * 10**9, "m"), + ], +) +def test_construction_out_of_bounds_td64s(val, unit): + td64 = np.timedelta64(val, unit) + with pytest.raises(OutOfBoundsTimedelta, match=str(td64)): + Timedelta(td64) + + # But just back in bounds and we are OK + assert Timedelta(td64 - 10**9) == td64 - 10**9 + + +@pytest.mark.parametrize( + "fmt,exp", + [ + ( + "P6DT0H50M3.010010012S", + Timedelta( + days=6, + minutes=50, + seconds=3, + milliseconds=10, + microseconds=10, + nanoseconds=12, + ), + ), + ( + "P-6DT0H50M3.010010012S", + Timedelta( + days=-6, + minutes=50, + seconds=3, + milliseconds=10, + microseconds=10, + nanoseconds=12, + ), + ), + ("P4DT12H30M5S", Timedelta(days=4, hours=12, minutes=30, seconds=5)), + ("P0DT0H0M0.000000123S", Timedelta(nanoseconds=123)), + ("P0DT0H0M0.00001S", Timedelta(microseconds=10)), + ("P0DT0H0M0.001S", Timedelta(milliseconds=1)), + ("P0DT0H1M0S", Timedelta(minutes=1)), + ("P1DT25H61M61S", Timedelta(days=1, hours=25, minutes=61, seconds=61)), + ("PT1S", Timedelta(seconds=1)), + ("PT0S", Timedelta(seconds=0)), + ("P1WT0S", Timedelta(days=7, seconds=0)), + ("P1D", Timedelta(days=1)), + ("P1DT1H", Timedelta(days=1, hours=1)), + ("P1W", Timedelta(days=7)), + ("PT300S", Timedelta(seconds=300)), + ("P1DT0H0M00000000000S", Timedelta(days=1)), + ("PT-6H3M", Timedelta(hours=-6, minutes=3)), + ("-PT6H3M", Timedelta(hours=-6, minutes=-3)), + ("-PT-6H+3M", Timedelta(hours=6, minutes=-3)), + ], +) +def test_iso_constructor(fmt, exp): + assert Timedelta(fmt) == exp + + +@pytest.mark.parametrize( + "fmt", + [ + "PPPPPPPPPPPP", + "PDTHMS", + "P0DT999H999M999S", + "P1DT0H0M0.0000000000000S", + "P1DT0H0M0.S", + "P", + "-P", + ], +) +def test_iso_constructor_raises(fmt): + msg = f"Invalid ISO 8601 Duration format - {fmt}" + with pytest.raises(ValueError, match=msg): + Timedelta(fmt) + + +@pytest.mark.parametrize( + "constructed_td, conversion", + [ + (Timedelta(nanoseconds=100), "100ns"), + ( + Timedelta( + days=1, + hours=1, + minutes=1, + weeks=1, + seconds=1, + milliseconds=1, + microseconds=1, + nanoseconds=1, + ), + 694861001001001, + ), + (Timedelta(microseconds=1) + Timedelta(nanoseconds=1), "1us1ns"), + (Timedelta(microseconds=1) - Timedelta(nanoseconds=1), "999ns"), + (Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), "990ns"), + ], +) +def test_td_constructor_on_nanoseconds(constructed_td, conversion): + # GH#9273 + assert constructed_td == Timedelta(conversion) + + +def test_td_constructor_value_error(): + msg = "Invalid type . Must be int or float." + with pytest.raises(TypeError, match=msg): + Timedelta(nanoseconds="abc") + + +def test_timedelta_constructor_identity(): + # Test for #30543 + expected = Timedelta(np.timedelta64(1, "s")) + result = Timedelta(expected) + assert result is expected + + +def test_timedelta_pass_td_and_kwargs_raises(): + # don't silently ignore the kwargs GH#48898 + td = Timedelta(days=1) + msg = ( + "Cannot pass both a Timedelta input and timedelta keyword arguments, " + r"got \['days'\]" + ) + with pytest.raises(ValueError, match=msg): + Timedelta(td, days=2) + + +@pytest.mark.parametrize( + "constructor, value, unit, expectation", + [ + (Timedelta, "10s", "ms", (ValueError, "unit must not be specified")), + (to_timedelta, "10s", "ms", (ValueError, "unit must not be specified")), + (to_timedelta, ["1", 2, 3], "s", (ValueError, "unit must not be specified")), + ], +) +def test_string_with_unit(constructor, value, unit, expectation): + exp, match = expectation + with pytest.raises(exp, match=match): + _ = constructor(value, unit=unit) + + +@pytest.mark.parametrize( + "value", + [ + "".join(elements) + for repetition in (1, 2) + for elements in product("+-, ", repeat=repetition) + ], +) +def test_string_without_numbers(value): + # GH39710 Timedelta input string with only symbols and no digits raises an error + msg = ( + "symbols w/o a number" + if value != "--" + else "only leading negative signs are allowed" + ) + with pytest.raises(ValueError, match=msg): + Timedelta(value) + + +def test_timedelta_new_npnat(): + # GH#48898 + nat = np.timedelta64("NaT", "h") + assert Timedelta(nat) is NaT + + +def test_subclass_respected(): + # GH#49579 + class MyCustomTimedelta(Timedelta): + pass + + td = MyCustomTimedelta("1 minute") + assert isinstance(td, MyCustomTimedelta) + + +def test_non_nano_value(): + # https://github.com/pandas-dev/pandas/issues/49076 + result = Timedelta(10, unit="D").as_unit("s").value + # `.value` shows nanoseconds, even though unit is 's' + assert result == 864000000000000 + + # out-of-nanoseconds-bounds `.value` raises informative message + msg = ( + r"Cannot convert Timedelta to nanoseconds without overflow. " + r"Use `.asm8.view\('i8'\)` to cast represent Timedelta in its " + r"own unit \(here, s\).$" + ) + td = Timedelta(1_000, "D").as_unit("s") * 1_000 + with pytest.raises(OverflowError, match=msg): + td.value + # check that the suggested workaround actually works + result = td.asm8.view("i8") + assert result == 86400000000 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_formats.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..e1b0076d5b7b99f4baba3f18ccf84d8054a31087 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_formats.py @@ -0,0 +1,109 @@ +import pytest + +from pandas import Timedelta + + +@pytest.mark.parametrize( + "td, expected_repr", + [ + (Timedelta(10, unit="d"), "Timedelta('10 days 00:00:00')"), + (Timedelta(10, unit="s"), "Timedelta('0 days 00:00:10')"), + (Timedelta(10, unit="ms"), "Timedelta('0 days 00:00:00.010000')"), + (Timedelta(-10, unit="ms"), "Timedelta('-1 days +23:59:59.990000')"), + ], +) +def test_repr(td, expected_repr): + assert repr(td) == expected_repr + + +@pytest.mark.parametrize( + "td, expected_iso", + [ + ( + Timedelta( + days=6, + minutes=50, + seconds=3, + milliseconds=10, + microseconds=10, + nanoseconds=12, + ), + "P6DT0H50M3.010010012S", + ), + (Timedelta(days=4, hours=12, minutes=30, seconds=5), "P4DT12H30M5S"), + (Timedelta(nanoseconds=123), "P0DT0H0M0.000000123S"), + # trim nano + (Timedelta(microseconds=10), "P0DT0H0M0.00001S"), + # trim micro + (Timedelta(milliseconds=1), "P0DT0H0M0.001S"), + # don't strip every 0 + (Timedelta(minutes=1), "P0DT0H1M0S"), + ], +) +def test_isoformat(td, expected_iso): + assert td.isoformat() == expected_iso + + +class TestReprBase: + def test_none(self): + delta_1d = Timedelta(1, unit="D") + delta_0d = Timedelta(0, unit="D") + delta_1s = Timedelta(1, unit="s") + delta_500ms = Timedelta(500, unit="ms") + + drepr = lambda x: x._repr_base() + assert drepr(delta_1d) == "1 days" + assert drepr(-delta_1d) == "-1 days" + assert drepr(delta_0d) == "0 days" + assert drepr(delta_1s) == "0 days 00:00:01" + assert drepr(delta_500ms) == "0 days 00:00:00.500000" + assert drepr(delta_1d + delta_1s) == "1 days 00:00:01" + assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01" + assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000" + assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000" + + def test_sub_day(self): + delta_1d = Timedelta(1, unit="D") + delta_0d = Timedelta(0, unit="D") + delta_1s = Timedelta(1, unit="s") + delta_500ms = Timedelta(500, unit="ms") + + drepr = lambda x: x._repr_base(format="sub_day") + assert drepr(delta_1d) == "1 days" + assert drepr(-delta_1d) == "-1 days" + assert drepr(delta_0d) == "00:00:00" + assert drepr(delta_1s) == "00:00:01" + assert drepr(delta_500ms) == "00:00:00.500000" + assert drepr(delta_1d + delta_1s) == "1 days 00:00:01" + assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01" + assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000" + assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000" + + def test_long(self): + delta_1d = Timedelta(1, unit="D") + delta_0d = Timedelta(0, unit="D") + delta_1s = Timedelta(1, unit="s") + delta_500ms = Timedelta(500, unit="ms") + + drepr = lambda x: x._repr_base(format="long") + assert drepr(delta_1d) == "1 days 00:00:00" + assert drepr(-delta_1d) == "-1 days +00:00:00" + assert drepr(delta_0d) == "0 days 00:00:00" + assert drepr(delta_1s) == "0 days 00:00:01" + assert drepr(delta_500ms) == "0 days 00:00:00.500000" + assert drepr(delta_1d + delta_1s) == "1 days 00:00:01" + assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01" + assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000" + assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000" + + def test_all(self): + delta_1d = Timedelta(1, unit="D") + delta_0d = Timedelta(0, unit="D") + delta_1ns = Timedelta(1, unit="ns") + + drepr = lambda x: x._repr_base(format="all") + assert drepr(delta_1d) == "1 days 00:00:00.000000000" + assert drepr(-delta_1d) == "-1 days +00:00:00.000000000" + assert drepr(delta_0d) == "0 days 00:00:00.000000000" + assert drepr(delta_1ns) == "0 days 00:00:00.000000001" + assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001" diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_timedelta.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_timedelta.py new file mode 100644 index 0000000000000000000000000000000000000000..d4398f66e6f890a26dc90d540766d71d7857ad67 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_timedelta.py @@ -0,0 +1,666 @@ +""" test the scalar Timedelta """ +from datetime import timedelta +import sys + +from hypothesis import ( + given, + strategies as st, +) +import numpy as np +import pytest + +from pandas._libs import lib +from pandas._libs.tslibs import ( + NaT, + iNaT, +) +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas.errors import OutOfBoundsTimedelta + +from pandas import ( + Timedelta, + to_timedelta, +) +import pandas._testing as tm + + +class TestNonNano: + @pytest.fixture(params=["s", "ms", "us"]) + def unit_str(self, request): + return request.param + + @pytest.fixture + def unit(self, unit_str): + # 7, 8, 9 correspond to second, millisecond, and microsecond, respectively + attr = f"NPY_FR_{unit_str}" + return getattr(NpyDatetimeUnit, attr).value + + @pytest.fixture + def val(self, unit): + # microsecond that would be just out of bounds for nano + us = 9223372800000000 + if unit == NpyDatetimeUnit.NPY_FR_us.value: + value = us + elif unit == NpyDatetimeUnit.NPY_FR_ms.value: + value = us // 1000 + else: + value = us // 1_000_000 + return value + + @pytest.fixture + def td(self, unit, val): + return Timedelta._from_value_and_reso(val, unit) + + def test_from_value_and_reso(self, unit, val): + # Just checking that the fixture is giving us what we asked for + td = Timedelta._from_value_and_reso(val, unit) + assert td._value == val + assert td._creso == unit + assert td.days == 106752 + + def test_unary_non_nano(self, td, unit): + assert abs(td)._creso == unit + assert (-td)._creso == unit + assert (+td)._creso == unit + + def test_sub_preserves_reso(self, td, unit): + res = td - td + expected = Timedelta._from_value_and_reso(0, unit) + assert res == expected + assert res._creso == unit + + def test_mul_preserves_reso(self, td, unit): + # The td fixture should always be far from the implementation + # bound, so doubling does not risk overflow. + res = td * 2 + assert res._value == td._value * 2 + assert res._creso == unit + + def test_cmp_cross_reso(self, td): + # numpy gets this wrong because of silent overflow + other = Timedelta(days=106751, unit="ns") + assert other < td + assert td > other + assert not other == td + assert td != other + + def test_to_pytimedelta(self, td): + res = td.to_pytimedelta() + expected = timedelta(days=106752) + assert type(res) is timedelta + assert res == expected + + def test_to_timedelta64(self, td, unit): + for res in [td.to_timedelta64(), td.to_numpy(), td.asm8]: + assert isinstance(res, np.timedelta64) + assert res.view("i8") == td._value + if unit == NpyDatetimeUnit.NPY_FR_s.value: + assert res.dtype == "m8[s]" + elif unit == NpyDatetimeUnit.NPY_FR_ms.value: + assert res.dtype == "m8[ms]" + elif unit == NpyDatetimeUnit.NPY_FR_us.value: + assert res.dtype == "m8[us]" + + def test_truediv_timedeltalike(self, td): + assert td / td == 1 + assert (2.5 * td) / td == 2.5 + + other = Timedelta(td._value) + msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow." + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td / other + + # Timedelta(other.to_pytimedelta()) has microsecond resolution, + # so the division doesn't require casting all the way to nanos, + # so succeeds + res = other.to_pytimedelta() / td + expected = other.to_pytimedelta() / td.to_pytimedelta() + assert res == expected + + # if there's no overflow, we cast to the higher reso + left = Timedelta._from_value_and_reso(50, NpyDatetimeUnit.NPY_FR_us.value) + right = Timedelta._from_value_and_reso(50, NpyDatetimeUnit.NPY_FR_ms.value) + result = left / right + assert result == 0.001 + + result = right / left + assert result == 1000 + + def test_truediv_numeric(self, td): + assert td / np.nan is NaT + + res = td / 2 + assert res._value == td._value / 2 + assert res._creso == td._creso + + res = td / 2.0 + assert res._value == td._value / 2 + assert res._creso == td._creso + + def test_floordiv_timedeltalike(self, td): + assert td // td == 1 + assert (2.5 * td) // td == 2 + + other = Timedelta(td._value) + msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td // other + + # Timedelta(other.to_pytimedelta()) has microsecond resolution, + # so the floordiv doesn't require casting all the way to nanos, + # so succeeds + res = other.to_pytimedelta() // td + assert res == 0 + + # if there's no overflow, we cast to the higher reso + left = Timedelta._from_value_and_reso(50050, NpyDatetimeUnit.NPY_FR_us.value) + right = Timedelta._from_value_and_reso(50, NpyDatetimeUnit.NPY_FR_ms.value) + result = left // right + assert result == 1 + result = right // left + assert result == 0 + + def test_floordiv_numeric(self, td): + assert td // np.nan is NaT + + res = td // 2 + assert res._value == td._value // 2 + assert res._creso == td._creso + + res = td // 2.0 + assert res._value == td._value // 2 + assert res._creso == td._creso + + assert td // np.array(np.nan) is NaT + + res = td // np.array(2) + assert res._value == td._value // 2 + assert res._creso == td._creso + + res = td // np.array(2.0) + assert res._value == td._value // 2 + assert res._creso == td._creso + + def test_addsub_mismatched_reso(self, td): + # need to cast to since td is out of bounds for ns, so + # so we would raise OverflowError without casting + other = Timedelta(days=1).as_unit("us") + + # td is out of bounds for ns + result = td + other + assert result._creso == other._creso + assert result.days == td.days + 1 + + result = other + td + assert result._creso == other._creso + assert result.days == td.days + 1 + + result = td - other + assert result._creso == other._creso + assert result.days == td.days - 1 + + result = other - td + assert result._creso == other._creso + assert result.days == 1 - td.days + + other2 = Timedelta(500) + msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td + other2 + with pytest.raises(OutOfBoundsTimedelta, match=msg): + other2 + td + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td - other2 + with pytest.raises(OutOfBoundsTimedelta, match=msg): + other2 - td + + def test_min(self, td): + assert td.min <= td + assert td.min._creso == td._creso + assert td.min._value == NaT._value + 1 + + def test_max(self, td): + assert td.max >= td + assert td.max._creso == td._creso + assert td.max._value == np.iinfo(np.int64).max + + def test_resolution(self, td): + expected = Timedelta._from_value_and_reso(1, td._creso) + result = td.resolution + assert result == expected + assert result._creso == expected._creso + + def test_hash(self) -> None: + # GH#54037 + second_resolution_max = Timedelta(0).as_unit("s").max + + assert hash(second_resolution_max) + + +def test_timedelta_class_min_max_resolution(): + # when accessed on the class (as opposed to an instance), we default + # to nanoseconds + assert Timedelta.min == Timedelta(NaT._value + 1) + assert Timedelta.min._creso == NpyDatetimeUnit.NPY_FR_ns.value + + assert Timedelta.max == Timedelta(np.iinfo(np.int64).max) + assert Timedelta.max._creso == NpyDatetimeUnit.NPY_FR_ns.value + + assert Timedelta.resolution == Timedelta(1) + assert Timedelta.resolution._creso == NpyDatetimeUnit.NPY_FR_ns.value + + +class TestTimedeltaUnaryOps: + def test_invert(self): + td = Timedelta(10, unit="d") + + msg = "bad operand type for unary ~" + with pytest.raises(TypeError, match=msg): + ~td + + # check this matches pytimedelta and timedelta64 + with pytest.raises(TypeError, match=msg): + ~(td.to_pytimedelta()) + + umsg = "ufunc 'invert' not supported for the input types" + with pytest.raises(TypeError, match=umsg): + ~(td.to_timedelta64()) + + def test_unary_ops(self): + td = Timedelta(10, unit="d") + + # __neg__, __pos__ + assert -td == Timedelta(-10, unit="d") + assert -td == Timedelta("-10d") + assert +td == Timedelta(10, unit="d") + + # __abs__, __abs__(__neg__) + assert abs(td) == td + assert abs(-td) == td + assert abs(-td) == Timedelta("10d") + + +class TestTimedeltas: + @pytest.mark.parametrize( + "unit, value, expected", + [ + ("us", 9.999, 9999), + ("ms", 9.999999, 9999999), + ("s", 9.999999999, 9999999999), + ], + ) + def test_rounding_on_int_unit_construction(self, unit, value, expected): + # GH 12690 + result = Timedelta(value, unit=unit) + assert result._value == expected + result = Timedelta(str(value) + unit) + assert result._value == expected + + def test_total_seconds_scalar(self): + # see gh-10939 + rng = Timedelta("1 days, 10:11:12.100123456") + expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9 + tm.assert_almost_equal(rng.total_seconds(), expt) + + rng = Timedelta(np.nan) + assert np.isnan(rng.total_seconds()) + + def test_conversion(self): + for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]: + pydt = td.to_pytimedelta() + assert td == Timedelta(pydt) + assert td == pydt + assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta) + + assert td == np.timedelta64(td._value, "ns") + td64 = td.to_timedelta64() + + assert td64 == np.timedelta64(td._value, "ns") + assert td == td64 + + assert isinstance(td64, np.timedelta64) + + # this is NOT equal and cannot be roundtripped (because of the nanos) + td = Timedelta("1 days, 10:11:12.012345678") + assert td != td.to_pytimedelta() + + def test_fields(self): + def check(value): + # that we are int + assert isinstance(value, int) + + # compat to datetime.timedelta + rng = to_timedelta("1 days, 10:11:12") + assert rng.days == 1 + assert rng.seconds == 10 * 3600 + 11 * 60 + 12 + assert rng.microseconds == 0 + assert rng.nanoseconds == 0 + + msg = "'Timedelta' object has no attribute '{}'" + with pytest.raises(AttributeError, match=msg.format("hours")): + rng.hours + with pytest.raises(AttributeError, match=msg.format("minutes")): + rng.minutes + with pytest.raises(AttributeError, match=msg.format("milliseconds")): + rng.milliseconds + + # GH 10050 + check(rng.days) + check(rng.seconds) + check(rng.microseconds) + check(rng.nanoseconds) + + td = Timedelta("-1 days, 10:11:12") + assert abs(td) == Timedelta("13:48:48") + assert str(td) == "-1 days +10:11:12" + assert -td == Timedelta("0 days 13:48:48") + assert -Timedelta("-1 days, 10:11:12")._value == 49728000000000 + assert Timedelta("-1 days, 10:11:12")._value == -49728000000000 + + rng = to_timedelta("-1 days, 10:11:12.100123456") + assert rng.days == -1 + assert rng.seconds == 10 * 3600 + 11 * 60 + 12 + assert rng.microseconds == 100 * 1000 + 123 + assert rng.nanoseconds == 456 + msg = "'Timedelta' object has no attribute '{}'" + with pytest.raises(AttributeError, match=msg.format("hours")): + rng.hours + with pytest.raises(AttributeError, match=msg.format("minutes")): + rng.minutes + with pytest.raises(AttributeError, match=msg.format("milliseconds")): + rng.milliseconds + + # components + tup = to_timedelta(-1, "us").components + assert tup.days == -1 + assert tup.hours == 23 + assert tup.minutes == 59 + assert tup.seconds == 59 + assert tup.milliseconds == 999 + assert tup.microseconds == 999 + assert tup.nanoseconds == 0 + + # GH 10050 + check(tup.days) + check(tup.hours) + check(tup.minutes) + check(tup.seconds) + check(tup.milliseconds) + check(tup.microseconds) + check(tup.nanoseconds) + + tup = Timedelta("-1 days 1 us").components + assert tup.days == -2 + assert tup.hours == 23 + assert tup.minutes == 59 + assert tup.seconds == 59 + assert tup.milliseconds == 999 + assert tup.microseconds == 999 + assert tup.nanoseconds == 0 + + # TODO: this is a test of to_timedelta string parsing + def test_iso_conversion(self): + # GH #21877 + expected = Timedelta(1, unit="s") + assert to_timedelta("P0DT0H0M1S") == expected + + # TODO: this is a test of to_timedelta returning NaT + def test_nat_converters(self): + result = to_timedelta("nat").to_numpy() + assert result.dtype.kind == "M" + assert result.astype("int64") == iNaT + + result = to_timedelta("nan").to_numpy() + assert result.dtype.kind == "M" + assert result.astype("int64") == iNaT + + def test_numeric_conversions(self): + assert Timedelta(0) == np.timedelta64(0, "ns") + assert Timedelta(10) == np.timedelta64(10, "ns") + assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns") + + assert Timedelta(10, unit="us") == np.timedelta64(10, "us") + assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms") + assert Timedelta(10, unit="s") == np.timedelta64(10, "s") + assert Timedelta(10, unit="d") == np.timedelta64(10, "D") + + def test_timedelta_conversions(self): + assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype( + "m8[ns]" + ) + assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype( + "m8[ns]" + ) + assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]") + + def test_to_numpy_alias(self): + # GH 24653: alias .to_numpy() for scalars + td = Timedelta("10m7s") + assert td.to_timedelta64() == td.to_numpy() + + # GH#44460 + msg = "dtype and copy arguments are ignored" + with pytest.raises(ValueError, match=msg): + td.to_numpy("m8[s]") + with pytest.raises(ValueError, match=msg): + td.to_numpy(copy=True) + + def test_identity(self): + td = Timedelta(10, unit="d") + assert isinstance(td, Timedelta) + assert isinstance(td, timedelta) + + def test_short_format_converters(self): + def conv(v): + return v.astype("m8[ns]") + + assert Timedelta("10") == np.timedelta64(10, "ns") + assert Timedelta("10ns") == np.timedelta64(10, "ns") + assert Timedelta("100") == np.timedelta64(100, "ns") + assert Timedelta("100ns") == np.timedelta64(100, "ns") + + assert Timedelta("1000") == np.timedelta64(1000, "ns") + assert Timedelta("1000ns") == np.timedelta64(1000, "ns") + assert Timedelta("1000NS") == np.timedelta64(1000, "ns") + + assert Timedelta("10us") == np.timedelta64(10000, "ns") + assert Timedelta("100us") == np.timedelta64(100000, "ns") + assert Timedelta("1000us") == np.timedelta64(1000000, "ns") + assert Timedelta("1000Us") == np.timedelta64(1000000, "ns") + assert Timedelta("1000uS") == np.timedelta64(1000000, "ns") + + assert Timedelta("1ms") == np.timedelta64(1000000, "ns") + assert Timedelta("10ms") == np.timedelta64(10000000, "ns") + assert Timedelta("100ms") == np.timedelta64(100000000, "ns") + assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns") + + assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns") + assert Timedelta("1s") == np.timedelta64(1000000000, "ns") + assert Timedelta("10s") == np.timedelta64(10000000000, "ns") + assert Timedelta("100s") == np.timedelta64(100000000000, "ns") + assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns") + + assert Timedelta("1d") == conv(np.timedelta64(1, "D")) + assert Timedelta("-1d") == -conv(np.timedelta64(1, "D")) + assert Timedelta("1D") == conv(np.timedelta64(1, "D")) + assert Timedelta("10D") == conv(np.timedelta64(10, "D")) + assert Timedelta("100D") == conv(np.timedelta64(100, "D")) + assert Timedelta("1000D") == conv(np.timedelta64(1000, "D")) + assert Timedelta("10000D") == conv(np.timedelta64(10000, "D")) + + # space + assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D")) + assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D")) + + # invalid + msg = "invalid unit abbreviation" + with pytest.raises(ValueError, match=msg): + Timedelta("1foo") + msg = "unit abbreviation w/o a number" + with pytest.raises(ValueError, match=msg): + Timedelta("foo") + + def test_full_format_converters(self): + def conv(v): + return v.astype("m8[ns]") + + d1 = np.timedelta64(1, "D") + + assert Timedelta("1days") == conv(d1) + assert Timedelta("1days,") == conv(d1) + assert Timedelta("- 1days,") == -conv(d1) + + assert Timedelta("00:00:01") == conv(np.timedelta64(1, "s")) + assert Timedelta("06:00:01") == conv(np.timedelta64(6 * 3600 + 1, "s")) + assert Timedelta("06:00:01.0") == conv(np.timedelta64(6 * 3600 + 1, "s")) + assert Timedelta("06:00:01.01") == conv( + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms") + ) + + assert Timedelta("- 1days, 00:00:01") == conv(-d1 + np.timedelta64(1, "s")) + assert Timedelta("1days, 06:00:01") == conv( + d1 + np.timedelta64(6 * 3600 + 1, "s") + ) + assert Timedelta("1days, 06:00:01.01") == conv( + d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms") + ) + + # invalid + msg = "have leftover units" + with pytest.raises(ValueError, match=msg): + Timedelta("- 1days, 00") + + def test_pickle(self): + v = Timedelta("1 days 10:11:12.0123456") + v_p = tm.round_trip_pickle(v) + assert v == v_p + + def test_timedelta_hash_equality(self): + # GH 11129 + v = Timedelta(1, "D") + td = timedelta(days=1) + assert hash(v) == hash(td) + + d = {td: 2} + assert d[v] == 2 + + tds = [Timedelta(seconds=1) + Timedelta(days=n) for n in range(20)] + assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds) + + # python timedeltas drop ns resolution + ns_td = Timedelta(1, "ns") + assert hash(ns_td) != hash(ns_td.to_pytimedelta()) + + @pytest.mark.skip_ubsan + @pytest.mark.xfail( + reason="pd.Timedelta violates the Python hash invariant (GH#44504).", + ) + @given( + st.integers( + min_value=(-sys.maxsize - 1) // 500, + max_value=sys.maxsize // 500, + ) + ) + def test_hash_equality_invariance(self, half_microseconds: int) -> None: + # GH#44504 + + nanoseconds = half_microseconds * 500 + + pandas_timedelta = Timedelta(nanoseconds) + numpy_timedelta = np.timedelta64(nanoseconds) + + # See: https://docs.python.org/3/glossary.html#term-hashable + # Hashable objects which compare equal must have the same hash value. + assert pandas_timedelta != numpy_timedelta or hash(pandas_timedelta) == hash( + numpy_timedelta + ) + + def test_implementation_limits(self): + min_td = Timedelta(Timedelta.min) + max_td = Timedelta(Timedelta.max) + + # GH 12727 + # timedelta limits correspond to int64 boundaries + assert min_td._value == iNaT + 1 + assert max_td._value == lib.i8max + + # Beyond lower limit, a NAT before the Overflow + assert (min_td - Timedelta(1, "ns")) is NaT + + msg = "int too (large|big) to convert" + with pytest.raises(OverflowError, match=msg): + min_td - Timedelta(2, "ns") + + with pytest.raises(OverflowError, match=msg): + max_td + Timedelta(1, "ns") + + # Same tests using the internal nanosecond values + td = Timedelta(min_td._value - 1, "ns") + assert td is NaT + + msg = "Cannot cast -9223372036854775809 from ns to 'ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(min_td._value - 2, "ns") + + msg = "Cannot cast 9223372036854775808 from ns to 'ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(max_td._value + 1, "ns") + + def test_total_seconds_precision(self): + # GH 19458 + assert Timedelta("30s").total_seconds() == 30.0 + assert Timedelta("0").total_seconds() == 0.0 + assert Timedelta("-2s").total_seconds() == -2.0 + assert Timedelta("5.324s").total_seconds() == 5.324 + assert (Timedelta("30s").total_seconds() - 30.0) < 1e-20 + assert (30.0 - Timedelta("30s").total_seconds()) < 1e-20 + + def test_resolution_string(self): + assert Timedelta(days=1).resolution_string == "D" + assert Timedelta(days=1, hours=6).resolution_string == "h" + assert Timedelta(days=1, minutes=6).resolution_string == "min" + assert Timedelta(days=1, seconds=6).resolution_string == "s" + assert Timedelta(days=1, milliseconds=6).resolution_string == "ms" + assert Timedelta(days=1, microseconds=6).resolution_string == "us" + assert Timedelta(days=1, nanoseconds=6).resolution_string == "ns" + + def test_resolution_deprecated(self): + # GH#21344 + td = Timedelta(days=4, hours=3) + result = td.resolution + assert result == Timedelta(nanoseconds=1) + + # Check that the attribute is available on the class, mirroring + # the stdlib timedelta behavior + result = Timedelta.resolution + assert result == Timedelta(nanoseconds=1) + + +@pytest.mark.parametrize( + "value, expected", + [ + (Timedelta("10s"), True), + (Timedelta("-10s"), True), + (Timedelta(10, unit="ns"), True), + (Timedelta(0, unit="ns"), False), + (Timedelta(-10, unit="ns"), True), + (Timedelta(None), True), + (NaT, True), + ], +) +def test_truthiness(value, expected): + # https://github.com/pandas-dev/pandas/issues/21484 + assert bool(value) is expected + + +def test_timedelta_attribute_precision(): + # GH 31354 + td = Timedelta(1552211999999999872, unit="ns") + result = td.days * 86400 + result += td.seconds + result *= 1000000 + result += td.microseconds + result *= 1000 + result += td.nanoseconds + expected = td._value + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_comparisons.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_comparisons.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..134b32a44342cfb2b9b5baebededf4de685a9d63 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_comparisons.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_constructors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62b7b6fc383fed83321d669ba52d6685d3bf8ed6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_formats.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddb6ba53beed427da70800ae5827873378adbc1e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_formats.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timezones.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timezones.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4771de6252bbf40d2146e652c804acfec6a732f6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timezones.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_as_unit.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_as_unit.py new file mode 100644 index 0000000000000000000000000000000000000000..5572c5936fb12718a59dcc3aca2ac1206bcfb965 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_as_unit.py @@ -0,0 +1,86 @@ +import pytest + +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas.errors import OutOfBoundsDatetime + +from pandas import Timestamp + + +class TestTimestampAsUnit: + def test_as_unit(self): + ts = Timestamp("1970-01-01").as_unit("ns") + assert ts.unit == "ns" + + assert ts.as_unit("ns") is ts + + res = ts.as_unit("us") + assert res._value == ts._value // 1000 + assert res._creso == NpyDatetimeUnit.NPY_FR_us.value + + rt = res.as_unit("ns") + assert rt._value == ts._value + assert rt._creso == ts._creso + + res = ts.as_unit("ms") + assert res._value == ts._value // 1_000_000 + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value + + rt = res.as_unit("ns") + assert rt._value == ts._value + assert rt._creso == ts._creso + + res = ts.as_unit("s") + assert res._value == ts._value // 1_000_000_000 + assert res._creso == NpyDatetimeUnit.NPY_FR_s.value + + rt = res.as_unit("ns") + assert rt._value == ts._value + assert rt._creso == ts._creso + + def test_as_unit_overflows(self): + # microsecond that would be just out of bounds for nano + us = 9223372800000000 + ts = Timestamp._from_value_and_reso(us, NpyDatetimeUnit.NPY_FR_us.value, None) + + msg = "Cannot cast 2262-04-12 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsDatetime, match=msg): + ts.as_unit("ns") + + res = ts.as_unit("ms") + assert res._value == us // 1000 + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value + + def test_as_unit_rounding(self): + ts = Timestamp(1_500_000) # i.e. 1500 microseconds + res = ts.as_unit("ms") + + expected = Timestamp(1_000_000) # i.e. 1 millisecond + assert res == expected + + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._value == 1 + + with pytest.raises(ValueError, match="Cannot losslessly convert units"): + ts.as_unit("ms", round_ok=False) + + def test_as_unit_non_nano(self): + # case where we are going neither to nor from nano + ts = Timestamp("1970-01-02").as_unit("ms") + assert ts.year == 1970 + assert ts.month == 1 + assert ts.day == 2 + assert ts.hour == ts.minute == ts.second == ts.microsecond == ts.nanosecond == 0 + + res = ts.as_unit("s") + assert res._value == 24 * 3600 + assert res.year == 1970 + assert res.month == 1 + assert res.day == 2 + assert ( + res.hour + == res.minute + == res.second + == res.microsecond + == res.nanosecond + == 0 + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_normalize.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_normalize.py new file mode 100644 index 0000000000000000000000000000000000000000..e097c9673e17a6058cac3d71cb8acb650db33d16 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_normalize.py @@ -0,0 +1,22 @@ +import pytest + +from pandas._libs.tslibs import Timestamp +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit + + +class TestTimestampNormalize: + @pytest.mark.parametrize("arg", ["2013-11-30", "2013-11-30 12:00:00"]) + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_normalize(self, tz_naive_fixture, arg, unit): + tz = tz_naive_fixture + ts = Timestamp(arg, tz=tz).as_unit(unit) + result = ts.normalize() + expected = Timestamp("2013-11-30", tz=tz) + assert result == expected + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + + def test_normalize_pre_epoch_dates(self): + # GH: 36294 + result = Timestamp("1969-01-01 09:00:00").normalize() + expected = Timestamp("1969-01-01 00:00:00") + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_tz_convert.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_tz_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..a7bb3b90805ab4a7b5d2a0ec08b795a2118daf36 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/methods/test_tz_convert.py @@ -0,0 +1,51 @@ +import dateutil +import pytest + +from pandas._libs.tslibs import timezones +import pandas.util._test_decorators as td + +from pandas import Timestamp + + +class TestTimestampTZConvert: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_astimezone(self, tzstr): + # astimezone is an alias for tz_convert, so keep it with + # the tz_convert tests + utcdate = Timestamp("3/11/2012 22:00", tz="UTC") + expected = utcdate.tz_convert(tzstr) + result = utcdate.astimezone(tzstr) + assert expected == result + assert isinstance(result, Timestamp) + + @pytest.mark.parametrize( + "stamp", + [ + "2014-02-01 09:00", + "2014-07-08 09:00", + "2014-11-01 17:00", + "2014-11-05 00:00", + ], + ) + def test_tz_convert_roundtrip(self, stamp, tz_aware_fixture): + tz = tz_aware_fixture + + ts = Timestamp(stamp, tz="UTC") + converted = ts.tz_convert(tz) + + reset = converted.tz_convert(None) + assert reset == Timestamp(stamp) + assert reset.tzinfo is None + assert reset == converted.tz_convert("UTC").tz_localize(None) + + @td.skip_if_windows + def test_tz_convert_utc_with_system_utc(self): + # from system utc to real utc + ts = Timestamp("2001-01-05 11:56", tz=timezones.maybe_get_tz("dateutil/UTC")) + # check that the time hasn't changed. + assert ts == ts.tz_convert(dateutil.tz.tzutc()) + + # from system utc to real utc + ts = Timestamp("2001-01-05 11:56", tz=timezones.maybe_get_tz("dateutil/UTC")) + # check that the time hasn't changed. + assert ts == ts.tz_convert(dateutil.tz.tzutc()) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_arithmetic.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..2d58513989a66a84791ed3b9169bd4f2c9b87c20 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_arithmetic.py @@ -0,0 +1,334 @@ +from datetime import ( + datetime, + timedelta, + timezone, +) + +from dateutil.tz import gettz +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, + Timedelta, + Timestamp, + offsets, + to_offset, +) + +import pandas._testing as tm + + +class TestTimestampArithmetic: + def test_overflow_offset(self): + # no overflow expected + + stamp = Timestamp("2000/1/1") + offset_no_overflow = to_offset("D") * 100 + + expected = Timestamp("2000/04/10") + assert stamp + offset_no_overflow == expected + + assert offset_no_overflow + stamp == expected + + expected = Timestamp("1999/09/23") + assert stamp - offset_no_overflow == expected + + def test_overflow_offset_raises(self): + # xref https://github.com/statsmodels/statsmodels/issues/3374 + # ends up multiplying really large numbers which overflow + + stamp = Timestamp("2017-01-13 00:00:00").as_unit("ns") + offset_overflow = 20169940 * offsets.Day(1) + lmsg2 = r"Cannot cast -?20169940 days \+?00:00:00 to unit='ns' without overflow" + + with pytest.raises(OutOfBoundsTimedelta, match=lmsg2): + stamp + offset_overflow + + with pytest.raises(OutOfBoundsTimedelta, match=lmsg2): + offset_overflow + stamp + + with pytest.raises(OutOfBoundsTimedelta, match=lmsg2): + stamp - offset_overflow + + # xref https://github.com/pandas-dev/pandas/issues/14080 + # used to crash, so check for proper overflow exception + + stamp = Timestamp("2000/1/1").as_unit("ns") + offset_overflow = to_offset("D") * 100**5 + + lmsg3 = ( + r"Cannot cast -?10000000000 days \+?00:00:00 to unit='ns' without overflow" + ) + with pytest.raises(OutOfBoundsTimedelta, match=lmsg3): + stamp + offset_overflow + + with pytest.raises(OutOfBoundsTimedelta, match=lmsg3): + offset_overflow + stamp + + with pytest.raises(OutOfBoundsTimedelta, match=lmsg3): + stamp - offset_overflow + + def test_overflow_timestamp_raises(self): + # https://github.com/pandas-dev/pandas/issues/31774 + msg = "Result is too large" + a = Timestamp("2101-01-01 00:00:00").as_unit("ns") + b = Timestamp("1688-01-01 00:00:00").as_unit("ns") + + with pytest.raises(OutOfBoundsDatetime, match=msg): + a - b + + # but we're OK for timestamp and datetime.datetime + assert (a - b.to_pydatetime()) == (a.to_pydatetime() - b) + + def test_delta_preserve_nanos(self): + val = Timestamp(1337299200000000123) + result = val + timedelta(1) + assert result.nanosecond == val.nanosecond + + def test_rsub_dtscalars(self, tz_naive_fixture): + # In particular, check that datetime64 - Timestamp works GH#28286 + td = Timedelta(1235345642000) + ts = Timestamp("2021-01-01", tz=tz_naive_fixture) + other = ts + td + + assert other - ts == td + assert other.to_pydatetime() - ts == td + if tz_naive_fixture is None: + assert other.to_datetime64() - ts == td + else: + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + other.to_datetime64() - ts + + def test_timestamp_sub_datetime(self): + dt = datetime(2013, 10, 12) + ts = Timestamp(datetime(2013, 10, 13)) + assert (ts - dt).days == 1 + assert (dt - ts).days == -1 + + def test_subtract_tzaware_datetime(self): + t1 = Timestamp("2020-10-22T22:00:00+00:00") + t2 = datetime(2020, 10, 22, 22, tzinfo=timezone.utc) + + result = t1 - t2 + + assert isinstance(result, Timedelta) + assert result == Timedelta("0 days") + + def test_subtract_timestamp_from_different_timezone(self): + t1 = Timestamp("20130101").tz_localize("US/Eastern") + t2 = Timestamp("20130101").tz_localize("CET") + + result = t1 - t2 + + assert isinstance(result, Timedelta) + assert result == Timedelta("0 days 06:00:00") + + def test_subtracting_involving_datetime_with_different_tz(self): + t1 = datetime(2013, 1, 1, tzinfo=timezone(timedelta(hours=-5))) + t2 = Timestamp("20130101").tz_localize("CET") + + result = t1 - t2 + + assert isinstance(result, Timedelta) + assert result == Timedelta("0 days 06:00:00") + + result = t2 - t1 + assert isinstance(result, Timedelta) + assert result == Timedelta("-1 days +18:00:00") + + def test_subtracting_different_timezones(self, tz_aware_fixture): + t_raw = Timestamp("20130101") + t_UTC = t_raw.tz_localize("UTC") + t_diff = t_UTC.tz_convert(tz_aware_fixture) + Timedelta("0 days 05:00:00") + + result = t_diff - t_UTC + + assert isinstance(result, Timedelta) + assert result == Timedelta("0 days 05:00:00") + + def test_addition_subtraction_types(self): + # Assert on the types resulting from Timestamp +/- various date/time + # objects + dt = datetime(2014, 3, 4) + td = timedelta(seconds=1) + ts = Timestamp(dt) + + msg = "Addition/subtraction of integers" + with pytest.raises(TypeError, match=msg): + # GH#22535 add/sub with integers is deprecated + ts + 1 + with pytest.raises(TypeError, match=msg): + ts - 1 + + # Timestamp + datetime not supported, though subtraction is supported + # and yields timedelta more tests in tseries/base/tests/test_base.py + assert type(ts - dt) == Timedelta + assert type(ts + td) == Timestamp + assert type(ts - td) == Timestamp + + # Timestamp +/- datetime64 not supported, so not tested (could possibly + # assert error raised?) + td64 = np.timedelta64(1, "D") + assert type(ts + td64) == Timestamp + assert type(ts - td64) == Timestamp + + @pytest.mark.parametrize( + "td", [Timedelta(hours=3), np.timedelta64(3, "h"), timedelta(hours=3)] + ) + def test_radd_tdscalar(self, td, fixed_now_ts): + # GH#24775 timedelta64+Timestamp should not raise + ts = fixed_now_ts + assert td + ts == ts + td + + @pytest.mark.parametrize( + "other,expected_difference", + [ + (np.timedelta64(-123, "ns"), -123), + (np.timedelta64(1234567898, "ns"), 1234567898), + (np.timedelta64(-123, "us"), -123000), + (np.timedelta64(-123, "ms"), -123000000), + ], + ) + def test_timestamp_add_timedelta64_unit(self, other, expected_difference): + now = datetime.now(timezone.utc) + ts = Timestamp(now).as_unit("ns") + result = ts + other + valdiff = result._value - ts._value + assert valdiff == expected_difference + + ts2 = Timestamp(now) + assert ts2 + other == result + + @pytest.mark.parametrize( + "ts", + [ + Timestamp("1776-07-04"), + Timestamp("1776-07-04", tz="UTC"), + ], + ) + @pytest.mark.parametrize( + "other", + [ + 1, + np.int64(1), + np.array([1, 2], dtype=np.int32), + np.array([3, 4], dtype=np.uint64), + ], + ) + def test_add_int_with_freq(self, ts, other): + msg = "Addition/subtraction of integers and integer-arrays" + with pytest.raises(TypeError, match=msg): + ts + other + with pytest.raises(TypeError, match=msg): + other + ts + + with pytest.raises(TypeError, match=msg): + ts - other + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + other - ts + + @pytest.mark.parametrize("shape", [(6,), (2, 3)]) + def test_addsub_m8ndarray(self, shape): + # GH#33296 + ts = Timestamp("2020-04-04 15:45").as_unit("ns") + other = np.arange(6).astype("m8[h]").reshape(shape) + + result = ts + other + + ex_stamps = [ts + Timedelta(hours=n) for n in range(6)] + expected = np.array([x.asm8 for x in ex_stamps], dtype="M8[ns]").reshape(shape) + tm.assert_numpy_array_equal(result, expected) + + result = other + ts + tm.assert_numpy_array_equal(result, expected) + + result = ts - other + ex_stamps = [ts - Timedelta(hours=n) for n in range(6)] + expected = np.array([x.asm8 for x in ex_stamps], dtype="M8[ns]").reshape(shape) + tm.assert_numpy_array_equal(result, expected) + + msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timestamp'" + with pytest.raises(TypeError, match=msg): + other - ts + + @pytest.mark.parametrize("shape", [(6,), (2, 3)]) + def test_addsub_m8ndarray_tzaware(self, shape): + # GH#33296 + ts = Timestamp("2020-04-04 15:45", tz="US/Pacific") + + other = np.arange(6).astype("m8[h]").reshape(shape) + + result = ts + other + + ex_stamps = [ts + Timedelta(hours=n) for n in range(6)] + expected = np.array(ex_stamps).reshape(shape) + tm.assert_numpy_array_equal(result, expected) + + result = other + ts + tm.assert_numpy_array_equal(result, expected) + + result = ts - other + ex_stamps = [ts - Timedelta(hours=n) for n in range(6)] + expected = np.array(ex_stamps).reshape(shape) + tm.assert_numpy_array_equal(result, expected) + + msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timestamp'" + with pytest.raises(TypeError, match=msg): + other - ts + + def test_subtract_different_utc_objects(self, utc_fixture, utc_fixture2): + # GH 32619 + dt = datetime(2021, 1, 1) + ts1 = Timestamp(dt, tz=utc_fixture) + ts2 = Timestamp(dt, tz=utc_fixture2) + result = ts1 - ts2 + expected = Timedelta(0) + assert result == expected + + @pytest.mark.parametrize( + "tz", + [ + pytz.timezone("US/Eastern"), + gettz("US/Eastern"), + "US/Eastern", + "dateutil/US/Eastern", + ], + ) + def test_timestamp_add_timedelta_push_over_dst_boundary(self, tz): + # GH#1389 + + # 4 hours before DST transition + stamp = Timestamp("3/10/2012 22:00", tz=tz) + + result = stamp + timedelta(hours=6) + + # spring forward, + "7" hours + expected = Timestamp("3/11/2012 05:00", tz=tz) + + assert result == expected + + +class SubDatetime(datetime): + pass + + +@pytest.mark.parametrize( + "lh,rh", + [ + (SubDatetime(2000, 1, 1), Timedelta(hours=1)), + (Timedelta(hours=1), SubDatetime(2000, 1, 1)), + ], +) +def test_dt_subclass_add_timedelta(lh, rh): + # GH#25851 + # ensure that subclassed datetime works for + # Timedelta operations + result = lh + rh + expected = SubDatetime(2000, 1, 1, 1) + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_comparisons.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_comparisons.py new file mode 100644 index 0000000000000000000000000000000000000000..e7e5541cf499f50e018802807f8c36cd364bed5f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_comparisons.py @@ -0,0 +1,313 @@ +from datetime import ( + datetime, + timedelta, +) +import operator + +import numpy as np +import pytest + +from pandas import Timestamp +import pandas._testing as tm + + +class TestTimestampComparison: + def test_compare_non_nano_dt64(self): + # don't raise when converting dt64 to Timestamp in __richcmp__ + dt = np.datetime64("1066-10-14") + ts = Timestamp(dt) + + assert dt == ts + + def test_comparison_dt64_ndarray(self): + ts = Timestamp("2021-01-01") + ts2 = Timestamp("2019-04-05") + arr = np.array([[ts.asm8, ts2.asm8]], dtype="M8[ns]") + + result = ts == arr + expected = np.array([[True, False]], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + result = arr == ts + tm.assert_numpy_array_equal(result, expected) + + result = ts != arr + tm.assert_numpy_array_equal(result, ~expected) + + result = arr != ts + tm.assert_numpy_array_equal(result, ~expected) + + result = ts2 < arr + tm.assert_numpy_array_equal(result, expected) + + result = arr < ts2 + tm.assert_numpy_array_equal(result, np.array([[False, False]], dtype=bool)) + + result = ts2 <= arr + tm.assert_numpy_array_equal(result, np.array([[True, True]], dtype=bool)) + + result = arr <= ts2 + tm.assert_numpy_array_equal(result, ~expected) + + result = ts >= arr + tm.assert_numpy_array_equal(result, np.array([[True, True]], dtype=bool)) + + result = arr >= ts + tm.assert_numpy_array_equal(result, np.array([[True, False]], dtype=bool)) + + @pytest.mark.parametrize("reverse", [True, False]) + def test_comparison_dt64_ndarray_tzaware(self, reverse, comparison_op): + ts = Timestamp("2021-01-01 00:00:00.00000", tz="UTC") + arr = np.array([ts.asm8, ts.asm8], dtype="M8[ns]") + + left, right = ts, arr + if reverse: + left, right = arr, ts + + if comparison_op is operator.eq: + expected = np.array([False, False], dtype=bool) + result = comparison_op(left, right) + tm.assert_numpy_array_equal(result, expected) + elif comparison_op is operator.ne: + expected = np.array([True, True], dtype=bool) + result = comparison_op(left, right) + tm.assert_numpy_array_equal(result, expected) + else: + msg = "Cannot compare tz-naive and tz-aware timestamps" + with pytest.raises(TypeError, match=msg): + comparison_op(left, right) + + def test_comparison_object_array(self): + # GH#15183 + ts = Timestamp("2011-01-03 00:00:00-0500", tz="US/Eastern") + other = Timestamp("2011-01-01 00:00:00-0500", tz="US/Eastern") + naive = Timestamp("2011-01-01 00:00:00") + + arr = np.array([other, ts], dtype=object) + res = arr == ts + expected = np.array([False, True], dtype=bool) + assert (res == expected).all() + + # 2D case + arr = np.array([[other, ts], [ts, other]], dtype=object) + res = arr != ts + expected = np.array([[True, False], [False, True]], dtype=bool) + assert res.shape == expected.shape + assert (res == expected).all() + + # tzaware mismatch + arr = np.array([naive], dtype=object) + msg = "Cannot compare tz-naive and tz-aware timestamps" + with pytest.raises(TypeError, match=msg): + arr < ts + + def test_comparison(self): + # 5-18-2012 00:00:00.000 + stamp = 1337299200000000000 + + val = Timestamp(stamp) + + assert val == val + assert not val != val + assert not val < val + assert val <= val + assert not val > val + assert val >= val + + other = datetime(2012, 5, 18) + assert val == other + assert not val != other + assert not val < other + assert val <= other + assert not val > other + assert val >= other + + other = Timestamp(stamp + 100) + + assert val != other + assert val != other + assert val < other + assert val <= other + assert other > val + assert other >= val + + def test_compare_invalid(self): + # GH#8058 + val = Timestamp("20130101 12:01:02") + assert not val == "foo" + assert not val == 10.0 + assert not val == 1 + assert not val == [] + assert not val == {"foo": 1} + assert not val == np.float64(1) + assert not val == np.int64(1) + + assert val != "foo" + assert val != 10.0 + assert val != 1 + assert val != [] + assert val != {"foo": 1} + assert val != np.float64(1) + assert val != np.int64(1) + + @pytest.mark.parametrize("tz", [None, "US/Pacific"]) + def test_compare_date(self, tz): + # GH#36131 comparing Timestamp with date object is deprecated + ts = Timestamp("2021-01-01 00:00:00.00000", tz=tz) + dt = ts.to_pydatetime().date() + # in 2.0 we disallow comparing pydate objects with Timestamps, + # following the stdlib datetime behavior. + + msg = "Cannot compare Timestamp with datetime.date" + for left, right in [(ts, dt), (dt, ts)]: + assert not left == right + assert left != right + + with pytest.raises(TypeError, match=msg): + left < right + with pytest.raises(TypeError, match=msg): + left <= right + with pytest.raises(TypeError, match=msg): + left > right + with pytest.raises(TypeError, match=msg): + left >= right + + def test_cant_compare_tz_naive_w_aware(self, utc_fixture): + # see GH#1404 + a = Timestamp("3/12/2012") + b = Timestamp("3/12/2012", tz=utc_fixture) + + msg = "Cannot compare tz-naive and tz-aware timestamps" + assert not a == b + assert a != b + with pytest.raises(TypeError, match=msg): + a < b + with pytest.raises(TypeError, match=msg): + a <= b + with pytest.raises(TypeError, match=msg): + a > b + with pytest.raises(TypeError, match=msg): + a >= b + + assert not b == a + assert b != a + with pytest.raises(TypeError, match=msg): + b < a + with pytest.raises(TypeError, match=msg): + b <= a + with pytest.raises(TypeError, match=msg): + b > a + with pytest.raises(TypeError, match=msg): + b >= a + + assert not a == b.to_pydatetime() + assert not a.to_pydatetime() == b + + def test_timestamp_compare_scalars(self): + # case where ndim == 0 + lhs = np.datetime64(datetime(2013, 12, 6)) + rhs = Timestamp("now") + nat = Timestamp("nat") + + ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"} + + for left, right in ops.items(): + left_f = getattr(operator, left) + right_f = getattr(operator, right) + expected = left_f(lhs, rhs) + + result = right_f(rhs, lhs) + assert result == expected + + expected = left_f(rhs, nat) + result = right_f(nat, rhs) + assert result == expected + + def test_timestamp_compare_with_early_datetime(self): + # e.g. datetime.min + stamp = Timestamp("2012-01-01") + + assert not stamp == datetime.min + assert not stamp == datetime(1600, 1, 1) + assert not stamp == datetime(2700, 1, 1) + assert stamp != datetime.min + assert stamp != datetime(1600, 1, 1) + assert stamp != datetime(2700, 1, 1) + assert stamp > datetime(1600, 1, 1) + assert stamp >= datetime(1600, 1, 1) + assert stamp < datetime(2700, 1, 1) + assert stamp <= datetime(2700, 1, 1) + + other = Timestamp.min.to_pydatetime(warn=False) + assert other - timedelta(microseconds=1) < Timestamp.min + + def test_timestamp_compare_oob_dt64(self): + us = np.timedelta64(1, "us") + other = np.datetime64(Timestamp.min).astype("M8[us]") + + # This may change if the implementation bound is dropped to match + # DatetimeArray/DatetimeIndex GH#24124 + assert Timestamp.min > other + # Note: numpy gets the reversed comparison wrong + + other = np.datetime64(Timestamp.max).astype("M8[us]") + assert Timestamp.max > other # not actually OOB + assert other < Timestamp.max + + assert Timestamp.max < other + us + # Note: numpy gets the reversed comparison wrong + + # GH-42794 + other = datetime(9999, 9, 9) + assert Timestamp.min < other + assert other > Timestamp.min + assert Timestamp.max < other + assert other > Timestamp.max + + other = datetime(1, 1, 1) + assert Timestamp.max > other + assert other < Timestamp.max + assert Timestamp.min > other + assert other < Timestamp.min + + def test_compare_zerodim_array(self, fixed_now_ts): + # GH#26916 + ts = fixed_now_ts + dt64 = np.datetime64("2016-01-01", "ns") + arr = np.array(dt64) + assert arr.ndim == 0 + + result = arr < ts + assert result is np.bool_(True) + result = arr > ts + assert result is np.bool_(False) + + +def test_rich_comparison_with_unsupported_type(): + # Comparisons with unsupported objects should return NotImplemented + # (it previously raised TypeError, see #24011) + + class Inf: + def __lt__(self, o): + return False + + def __le__(self, o): + return isinstance(o, Inf) + + def __gt__(self, o): + return not isinstance(o, Inf) + + def __ge__(self, o): + return True + + def __eq__(self, other) -> bool: + return isinstance(other, Inf) + + inf = Inf() + timestamp = Timestamp("2018-11-30") + + for left, right in [(inf, timestamp), (timestamp, inf)]: + assert left > right or left < right + assert left >= right or left <= right + assert not left == right # pylint: disable=unneeded-not + assert left != right diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_constructors.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..3975f3c46aaa128c4ad8dcedfb78800b203180ca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_constructors.py @@ -0,0 +1,1068 @@ +import calendar +from datetime import ( + date, + datetime, + timedelta, + timezone, +) +import zoneinfo + +import dateutil.tz +from dateutil.tz import ( + gettz, + tzoffset, + tzutc, +) +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas.compat import PY310 +from pandas.errors import OutOfBoundsDatetime + +from pandas import ( + NA, + NaT, + Period, + Timedelta, + Timestamp, +) + + +class TestTimestampConstructorUnitKeyword: + @pytest.mark.parametrize("typ", [int, float]) + def test_constructor_int_float_with_YM_unit(self, typ): + # GH#47266 avoid the conversions in cast_from_unit + val = typ(150) + + ts = Timestamp(val, unit="Y") + expected = Timestamp("2120-01-01") + assert ts == expected + + ts = Timestamp(val, unit="M") + expected = Timestamp("1982-07-01") + assert ts == expected + + @pytest.mark.parametrize("typ", [int, float]) + def test_construct_from_int_float_with_unit_out_of_bound_raises(self, typ): + # GH#50870 make sure we get a OutOfBoundsDatetime instead of OverflowError + val = typ(150000000000000) + + msg = f"cannot convert input {val} with the unit 'D'" + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(val, unit="D") + + def test_constructor_float_not_round_with_YM_unit_raises(self): + # GH#47267 avoid the conversions in cast_from-unit + + msg = "Conversion of non-round float with unit=[MY] is ambiguous" + with pytest.raises(ValueError, match=msg): + Timestamp(150.5, unit="Y") + + with pytest.raises(ValueError, match=msg): + Timestamp(150.5, unit="M") + + @pytest.mark.parametrize( + "value, check_kwargs", + [ + [946688461000000000, {}], + [946688461000000000 / 1000, {"unit": "us"}], + [946688461000000000 / 1_000_000, {"unit": "ms"}], + [946688461000000000 / 1_000_000_000, {"unit": "s"}], + [10957, {"unit": "D", "h": 0}], + [ + (946688461000000000 + 500000) / 1000000000, + {"unit": "s", "us": 499, "ns": 964}, + ], + [ + (946688461000000000 + 500000000) / 1000000000, + {"unit": "s", "us": 500000}, + ], + [(946688461000000000 + 500000) / 1000000, {"unit": "ms", "us": 500}], + [(946688461000000000 + 500000) / 1000, {"unit": "us", "us": 500}], + [(946688461000000000 + 500000000) / 1000000, {"unit": "ms", "us": 500000}], + [946688461000000000 / 1000.0 + 5, {"unit": "us", "us": 5}], + [946688461000000000 / 1000.0 + 5000, {"unit": "us", "us": 5000}], + [946688461000000000 / 1000000.0 + 0.5, {"unit": "ms", "us": 500}], + [946688461000000000 / 1000000.0 + 0.005, {"unit": "ms", "us": 5, "ns": 5}], + [946688461000000000 / 1000000000.0 + 0.5, {"unit": "s", "us": 500000}], + [10957 + 0.5, {"unit": "D", "h": 12}], + ], + ) + def test_construct_with_unit(self, value, check_kwargs): + def check(value, unit=None, h=1, s=1, us=0, ns=0): + stamp = Timestamp(value, unit=unit) + assert stamp.year == 2000 + assert stamp.month == 1 + assert stamp.day == 1 + assert stamp.hour == h + if unit != "D": + assert stamp.minute == 1 + assert stamp.second == s + assert stamp.microsecond == us + else: + assert stamp.minute == 0 + assert stamp.second == 0 + assert stamp.microsecond == 0 + assert stamp.nanosecond == ns + + check(value, **check_kwargs) + + +class TestTimestampConstructorFoldKeyword: + def test_timestamp_constructor_invalid_fold_raise(self): + # Test for GH#25057 + # Valid fold values are only [None, 0, 1] + msg = "Valid values for the fold argument are None, 0, or 1." + with pytest.raises(ValueError, match=msg): + Timestamp(123, fold=2) + + def test_timestamp_constructor_pytz_fold_raise(self): + # Test for GH#25057 + # pytz doesn't support fold. Check that we raise + # if fold is passed with pytz + msg = "pytz timezones do not support fold. Please use dateutil timezones." + tz = pytz.timezone("Europe/London") + with pytest.raises(ValueError, match=msg): + Timestamp(datetime(2019, 10, 27, 0, 30, 0, 0), tz=tz, fold=0) + + @pytest.mark.parametrize("fold", [0, 1]) + @pytest.mark.parametrize( + "ts_input", + [ + 1572136200000000000, + 1572136200000000000.0, + np.datetime64(1572136200000000000, "ns"), + "2019-10-27 01:30:00+01:00", + datetime(2019, 10, 27, 0, 30, 0, 0, tzinfo=timezone.utc), + ], + ) + def test_timestamp_constructor_fold_conflict(self, ts_input, fold): + # Test for GH#25057 + # Check that we raise on fold conflict + msg = ( + "Cannot pass fold with possibly unambiguous input: int, float, " + "numpy.datetime64, str, or timezone-aware datetime-like. " + "Pass naive datetime-like or build Timestamp from components." + ) + with pytest.raises(ValueError, match=msg): + Timestamp(ts_input=ts_input, fold=fold) + + @pytest.mark.parametrize("tz", ["dateutil/Europe/London", None]) + @pytest.mark.parametrize("fold", [0, 1]) + def test_timestamp_constructor_retain_fold(self, tz, fold): + # Test for GH#25057 + # Check that we retain fold + ts = Timestamp(year=2019, month=10, day=27, hour=1, minute=30, tz=tz, fold=fold) + result = ts.fold + expected = fold + assert result == expected + + try: + _tzs = [ + "dateutil/Europe/London", + zoneinfo.ZoneInfo("Europe/London"), + ] + except zoneinfo.ZoneInfoNotFoundError: + _tzs = ["dateutil/Europe/London"] + + @pytest.mark.parametrize("tz", _tzs) + @pytest.mark.parametrize( + "ts_input,fold_out", + [ + (1572136200000000000, 0), + (1572139800000000000, 1), + ("2019-10-27 01:30:00+01:00", 0), + ("2019-10-27 01:30:00+00:00", 1), + (datetime(2019, 10, 27, 1, 30, 0, 0, fold=0), 0), + (datetime(2019, 10, 27, 1, 30, 0, 0, fold=1), 1), + ], + ) + def test_timestamp_constructor_infer_fold_from_value(self, tz, ts_input, fold_out): + # Test for GH#25057 + # Check that we infer fold correctly based on timestamps since utc + # or strings + ts = Timestamp(ts_input, tz=tz) + result = ts.fold + expected = fold_out + assert result == expected + + @pytest.mark.parametrize("tz", ["dateutil/Europe/London"]) + @pytest.mark.parametrize( + "ts_input,fold,value_out", + [ + (datetime(2019, 10, 27, 1, 30, 0, 0), 0, 1572136200000000), + (datetime(2019, 10, 27, 1, 30, 0, 0), 1, 1572139800000000), + ], + ) + def test_timestamp_constructor_adjust_value_for_fold( + self, tz, ts_input, fold, value_out + ): + # Test for GH#25057 + # Check that we adjust value for fold correctly + # based on timestamps since utc + ts = Timestamp(ts_input, tz=tz, fold=fold) + result = ts._value + expected = value_out + assert result == expected + + +class TestTimestampConstructorPositionalAndKeywordSupport: + def test_constructor_positional(self): + # see GH#10758 + msg = ( + "'NoneType' object cannot be interpreted as an integer" + if PY310 + else "an integer is required" + ) + with pytest.raises(TypeError, match=msg): + Timestamp(2000, 1) + + msg = "month must be in 1..12" + with pytest.raises(ValueError, match=msg): + Timestamp(2000, 0, 1) + with pytest.raises(ValueError, match=msg): + Timestamp(2000, 13, 1) + + msg = "day is out of range for month" + with pytest.raises(ValueError, match=msg): + Timestamp(2000, 1, 0) + with pytest.raises(ValueError, match=msg): + Timestamp(2000, 1, 32) + + # see gh-11630 + assert repr(Timestamp(2015, 11, 12)) == repr(Timestamp("20151112")) + assert repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) == repr( + Timestamp("2015-11-12 01:02:03.999999") + ) + + def test_constructor_keyword(self): + # GH#10758 + msg = "function missing required argument 'day'|Required argument 'day'" + with pytest.raises(TypeError, match=msg): + Timestamp(year=2000, month=1) + + msg = "month must be in 1..12" + with pytest.raises(ValueError, match=msg): + Timestamp(year=2000, month=0, day=1) + with pytest.raises(ValueError, match=msg): + Timestamp(year=2000, month=13, day=1) + + msg = "day is out of range for month" + with pytest.raises(ValueError, match=msg): + Timestamp(year=2000, month=1, day=0) + with pytest.raises(ValueError, match=msg): + Timestamp(year=2000, month=1, day=32) + + assert repr(Timestamp(year=2015, month=11, day=12)) == repr( + Timestamp("20151112") + ) + + assert repr( + Timestamp( + year=2015, + month=11, + day=12, + hour=1, + minute=2, + second=3, + microsecond=999999, + ) + ) == repr(Timestamp("2015-11-12 01:02:03.999999")) + + @pytest.mark.parametrize( + "arg", + [ + "year", + "month", + "day", + "hour", + "minute", + "second", + "microsecond", + "nanosecond", + ], + ) + def test_invalid_date_kwarg_with_string_input(self, arg): + kwarg = {arg: 1} + msg = "Cannot pass a date attribute keyword argument" + with pytest.raises(ValueError, match=msg): + Timestamp("2010-10-10 12:59:59.999999999", **kwarg) + + @pytest.mark.parametrize("kwargs", [{}, {"year": 2020}, {"year": 2020, "month": 1}]) + def test_constructor_missing_keyword(self, kwargs): + # GH#31200 + + # The exact error message of datetime() depends on its version + msg1 = r"function missing required argument '(year|month|day)' \(pos [123]\)" + msg2 = r"Required argument '(year|month|day)' \(pos [123]\) not found" + msg = "|".join([msg1, msg2]) + + with pytest.raises(TypeError, match=msg): + Timestamp(**kwargs) + + def test_constructor_positional_with_tzinfo(self): + # GH#31929 + ts = Timestamp(2020, 12, 31, tzinfo=timezone.utc) + expected = Timestamp("2020-12-31", tzinfo=timezone.utc) + assert ts == expected + + @pytest.mark.parametrize("kwd", ["nanosecond", "microsecond", "second", "minute"]) + def test_constructor_positional_keyword_mixed_with_tzinfo(self, kwd, request): + # TODO: if we passed microsecond with a keyword we would mess up + # xref GH#45307 + if kwd != "nanosecond": + # nanosecond is keyword-only as of 2.0, others are not + mark = pytest.mark.xfail(reason="GH#45307") + request.applymarker(mark) + + kwargs = {kwd: 4} + ts = Timestamp(2020, 12, 31, tzinfo=timezone.utc, **kwargs) + + td_kwargs = {kwd + "s": 4} + td = Timedelta(**td_kwargs) + expected = Timestamp("2020-12-31", tz=timezone.utc) + td + assert ts == expected + + +class TestTimestampClassMethodConstructors: + # Timestamp constructors other than __new__ + + def test_constructor_strptime(self): + # GH#25016 + # Test support for Timestamp.strptime + fmt = "%Y%m%d-%H%M%S-%f%z" + ts = "20190129-235348-000001+0000" + msg = r"Timestamp.strptime\(\) is not implemented" + with pytest.raises(NotImplementedError, match=msg): + Timestamp.strptime(ts, fmt) + + def test_constructor_fromisocalendar(self): + # GH#30395 + expected_timestamp = Timestamp("2000-01-03 00:00:00") + expected_stdlib = datetime.fromisocalendar(2000, 1, 1) + result = Timestamp.fromisocalendar(2000, 1, 1) + assert result == expected_timestamp + assert result == expected_stdlib + assert isinstance(result, Timestamp) + + def test_constructor_fromordinal(self): + base = datetime(2000, 1, 1) + + ts = Timestamp.fromordinal(base.toordinal()) + assert base == ts + assert base.toordinal() == ts.toordinal() + + ts = Timestamp.fromordinal(base.toordinal(), tz="US/Eastern") + assert Timestamp("2000-01-01", tz="US/Eastern") == ts + assert base.toordinal() == ts.toordinal() + + # GH#3042 + dt = datetime(2011, 4, 16, 0, 0) + ts = Timestamp.fromordinal(dt.toordinal()) + assert ts.to_pydatetime() == dt + + # with a tzinfo + stamp = Timestamp("2011-4-16", tz="US/Eastern") + dt_tz = stamp.to_pydatetime() + ts = Timestamp.fromordinal(dt_tz.toordinal(), tz="US/Eastern") + assert ts.to_pydatetime() == dt_tz + + def test_now(self): + # GH#9000 + ts_from_string = Timestamp("now") + ts_from_method = Timestamp.now() + ts_datetime = datetime.now() + + ts_from_string_tz = Timestamp("now", tz="US/Eastern") + ts_from_method_tz = Timestamp.now(tz="US/Eastern") + + # Check that the delta between the times is less than 1s (arbitrarily + # small) + delta = Timedelta(seconds=1) + assert abs(ts_from_method - ts_from_string) < delta + assert abs(ts_datetime - ts_from_method) < delta + assert abs(ts_from_method_tz - ts_from_string_tz) < delta + assert ( + abs( + ts_from_string_tz.tz_localize(None) + - ts_from_method_tz.tz_localize(None) + ) + < delta + ) + + def test_today(self): + ts_from_string = Timestamp("today") + ts_from_method = Timestamp.today() + ts_datetime = datetime.today() + + ts_from_string_tz = Timestamp("today", tz="US/Eastern") + ts_from_method_tz = Timestamp.today(tz="US/Eastern") + + # Check that the delta between the times is less than 1s (arbitrarily + # small) + delta = Timedelta(seconds=1) + assert abs(ts_from_method - ts_from_string) < delta + assert abs(ts_datetime - ts_from_method) < delta + assert abs(ts_from_method_tz - ts_from_string_tz) < delta + assert ( + abs( + ts_from_string_tz.tz_localize(None) + - ts_from_method_tz.tz_localize(None) + ) + < delta + ) + + +class TestTimestampResolutionInference: + def test_construct_from_time_unit(self): + # GH#54097 only passing a time component, no date + ts = Timestamp("01:01:01.111") + assert ts.unit == "ms" + + def test_constructor_str_infer_reso(self): + # non-iso8601 path + + # _parse_delimited_date path + ts = Timestamp("01/30/2023") + assert ts.unit == "s" + + # _parse_dateabbr_string path + ts = Timestamp("2015Q1") + assert ts.unit == "s" + + # dateutil_parse path + ts = Timestamp("2016-01-01 1:30:01 PM") + assert ts.unit == "s" + + ts = Timestamp("2016 June 3 15:25:01.345") + assert ts.unit == "ms" + + ts = Timestamp("300-01-01") + assert ts.unit == "s" + + ts = Timestamp("300 June 1:30:01.300") + assert ts.unit == "ms" + + # dateutil path -> don't drop trailing zeros + ts = Timestamp("01-01-2013T00:00:00.000000000+0000") + assert ts.unit == "ns" + + ts = Timestamp("2016/01/02 03:04:05.001000 UTC") + assert ts.unit == "us" + + # higher-than-nanosecond -> we drop the trailing bits + ts = Timestamp("01-01-2013T00:00:00.000000002100+0000") + assert ts == Timestamp("01-01-2013T00:00:00.000000002+0000") + assert ts.unit == "ns" + + # GH#56208 minute reso through the ISO8601 path with tz offset + ts = Timestamp("2020-01-01 00:00+00:00") + assert ts.unit == "s" + + ts = Timestamp("2020-01-01 00+00:00") + assert ts.unit == "s" + + @pytest.mark.parametrize("method", ["now", "today"]) + def test_now_today_unit(self, method): + # GH#55879 + ts_from_method = getattr(Timestamp, method)() + ts_from_string = Timestamp(method) + assert ts_from_method.unit == ts_from_string.unit == "us" + + +class TestTimestampConstructors: + def test_weekday_but_no_day_raises(self): + # GH#52659 + msg = "Parsing datetimes with weekday but no day information is not supported" + with pytest.raises(ValueError, match=msg): + Timestamp("2023 Sept Thu") + + def test_construct_from_string_invalid_raises(self): + # dateutil (weirdly) parses "200622-12-31" as + # datetime(2022, 6, 20, 12, 0, tzinfo=tzoffset(None, -111600) + # which besides being mis-parsed, is a tzoffset that will cause + # str(ts) to raise ValueError. Ensure we raise in the constructor + # instead. + # see test_to_datetime_malformed_raise for analogous to_datetime test + with pytest.raises(ValueError, match="gives an invalid tzoffset"): + Timestamp("200622-12-31") + + def test_constructor_from_iso8601_str_with_offset_reso(self): + # GH#49737 + ts = Timestamp("2016-01-01 04:05:06-01:00") + assert ts.unit == "s" + + ts = Timestamp("2016-01-01 04:05:06.000-01:00") + assert ts.unit == "ms" + + ts = Timestamp("2016-01-01 04:05:06.000000-01:00") + assert ts.unit == "us" + + ts = Timestamp("2016-01-01 04:05:06.000000001-01:00") + assert ts.unit == "ns" + + def test_constructor_from_date_second_reso(self): + # GH#49034 constructing from a pydate object gets lowest supported + # reso, i.e. seconds + obj = date(2012, 9, 1) + ts = Timestamp(obj) + assert ts.unit == "s" + + def test_constructor_datetime64_with_tz(self): + # GH#42288, GH#24559 + dt = np.datetime64("1970-01-01 05:00:00") + tzstr = "UTC+05:00" + + # pre-2.0 this interpreted dt as a UTC time. in 2.0 this is treated + # as a wall-time, consistent with DatetimeIndex behavior + ts = Timestamp(dt, tz=tzstr) + + alt = Timestamp(dt).tz_localize(tzstr) + assert ts == alt + assert ts.hour == 5 + + def test_constructor(self): + base_str = "2014-07-01 09:00" + base_dt = datetime(2014, 7, 1, 9) + base_expected = 1_404_205_200_000_000_000 + + # confirm base representation is correct + assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected + + tests = [ + (base_str, base_dt, base_expected), + ( + "2014-07-01 10:00", + datetime(2014, 7, 1, 10), + base_expected + 3600 * 1_000_000_000, + ), + ( + "2014-07-01 09:00:00.000008000", + datetime(2014, 7, 1, 9, 0, 0, 8), + base_expected + 8000, + ), + ( + "2014-07-01 09:00:00.000000005", + Timestamp("2014-07-01 09:00:00.000000005"), + base_expected + 5, + ), + ] + + timezones = [ + (None, 0), + ("UTC", 0), + (pytz.utc, 0), + ("Asia/Tokyo", 9), + ("US/Eastern", -4), + ("dateutil/US/Pacific", -7), + (pytz.FixedOffset(-180), -3), + (dateutil.tz.tzoffset(None, 18000), 5), + ] + + for date_str, date_obj, expected in tests: + for result in [Timestamp(date_str), Timestamp(date_obj)]: + result = result.as_unit("ns") # test originally written before non-nano + # only with timestring + assert result.as_unit("ns")._value == expected + + # re-creation shouldn't affect to internal value + result = Timestamp(result) + assert result.as_unit("ns")._value == expected + + # with timezone + for tz, offset in timezones: + for result in [Timestamp(date_str, tz=tz), Timestamp(date_obj, tz=tz)]: + result = result.as_unit( + "ns" + ) # test originally written before non-nano + expected_tz = expected - offset * 3600 * 1_000_000_000 + assert result.as_unit("ns")._value == expected_tz + + # should preserve tz + result = Timestamp(result) + assert result.as_unit("ns")._value == expected_tz + + # should convert to UTC + if tz is not None: + result = Timestamp(result).tz_convert("UTC") + else: + result = Timestamp(result, tz="UTC") + expected_utc = expected - offset * 3600 * 1_000_000_000 + assert result.as_unit("ns")._value == expected_utc + + def test_constructor_with_stringoffset(self): + # GH 7833 + base_str = "2014-07-01 11:00:00+02:00" + base_dt = datetime(2014, 7, 1, 9) + base_expected = 1_404_205_200_000_000_000 + + # confirm base representation is correct + assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected + + tests = [ + (base_str, base_expected), + ("2014-07-01 12:00:00+02:00", base_expected + 3600 * 1_000_000_000), + ("2014-07-01 11:00:00.000008000+02:00", base_expected + 8000), + ("2014-07-01 11:00:00.000000005+02:00", base_expected + 5), + ] + + timezones = [ + (None, 0), + ("UTC", 0), + (pytz.utc, 0), + ("Asia/Tokyo", 9), + ("US/Eastern", -4), + ("dateutil/US/Pacific", -7), + (pytz.FixedOffset(-180), -3), + (dateutil.tz.tzoffset(None, 18000), 5), + ] + + for date_str, expected in tests: + for result in [Timestamp(date_str)]: + # only with timestring + assert result.as_unit("ns")._value == expected + + # re-creation shouldn't affect to internal value + result = Timestamp(result) + assert result.as_unit("ns")._value == expected + + # with timezone + for tz, offset in timezones: + result = Timestamp(date_str, tz=tz) + expected_tz = expected + assert result.as_unit("ns")._value == expected_tz + + # should preserve tz + result = Timestamp(result) + assert result.as_unit("ns")._value == expected_tz + + # should convert to UTC + result = Timestamp(result).tz_convert("UTC") + expected_utc = expected + assert result.as_unit("ns")._value == expected_utc + + # This should be 2013-11-01 05:00 in UTC + # converted to Chicago tz + result = Timestamp("2013-11-01 00:00:00-0500", tz="America/Chicago") + assert result._value == Timestamp("2013-11-01 05:00")._value + expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" + assert repr(result) == expected + assert result == eval(repr(result)) + + # This should be 2013-11-01 05:00 in UTC + # converted to Tokyo tz (+09:00) + result = Timestamp("2013-11-01 00:00:00-0500", tz="Asia/Tokyo") + assert result._value == Timestamp("2013-11-01 05:00")._value + expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')" + assert repr(result) == expected + assert result == eval(repr(result)) + + # GH11708 + # This should be 2015-11-18 10:00 in UTC + # converted to Asia/Katmandu + result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu") + assert result._value == Timestamp("2015-11-18 10:00")._value + expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')" + assert repr(result) == expected + assert result == eval(repr(result)) + + # This should be 2015-11-18 10:00 in UTC + # converted to Asia/Kolkata + result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata") + assert result._value == Timestamp("2015-11-18 10:00")._value + expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')" + assert repr(result) == expected + assert result == eval(repr(result)) + + def test_constructor_invalid(self): + msg = "Cannot convert input" + with pytest.raises(TypeError, match=msg): + Timestamp(slice(2)) + msg = "Cannot convert Period" + with pytest.raises(ValueError, match=msg): + Timestamp(Period("1000-01-01")) + + def test_constructor_invalid_tz(self): + # GH#17690 + msg = ( + "Argument 'tzinfo' has incorrect type " + r"\(expected datetime.tzinfo, got str\)" + ) + with pytest.raises(TypeError, match=msg): + Timestamp("2017-10-22", tzinfo="US/Eastern") + + msg = "at most one of" + with pytest.raises(ValueError, match=msg): + Timestamp("2017-10-22", tzinfo=pytz.utc, tz="UTC") + + msg = "Cannot pass a date attribute keyword argument when passing a date string" + with pytest.raises(ValueError, match=msg): + # GH#5168 + # case where user tries to pass tz as an arg, not kwarg, gets + # interpreted as `year` + Timestamp("2012-01-01", "US/Pacific") + + def test_constructor_tz_or_tzinfo(self): + # GH#17943, GH#17690, GH#5168 + stamps = [ + Timestamp(year=2017, month=10, day=22, tz="UTC"), + Timestamp(year=2017, month=10, day=22, tzinfo=pytz.utc), + Timestamp(year=2017, month=10, day=22, tz=pytz.utc), + Timestamp(datetime(2017, 10, 22), tzinfo=pytz.utc), + Timestamp(datetime(2017, 10, 22), tz="UTC"), + Timestamp(datetime(2017, 10, 22), tz=pytz.utc), + ] + assert all(ts == stamps[0] for ts in stamps) + + @pytest.mark.parametrize( + "result", + [ + Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1), + Timestamp( + year=2000, + month=1, + day=2, + hour=3, + minute=4, + second=5, + microsecond=6, + nanosecond=1, + ), + Timestamp( + year=2000, + month=1, + day=2, + hour=3, + minute=4, + second=5, + microsecond=6, + nanosecond=1, + tz="UTC", + ), + Timestamp(2000, 1, 2, 3, 4, 5, 6, None, nanosecond=1), + Timestamp(2000, 1, 2, 3, 4, 5, 6, tz=pytz.UTC, nanosecond=1), + ], + ) + def test_constructor_nanosecond(self, result): + # GH 18898 + # As of 2.0 (GH 49416), nanosecond should not be accepted positionally + expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz) + expected = expected + Timedelta(nanoseconds=1) + assert result == expected + + @pytest.mark.parametrize("z", ["Z0", "Z00"]) + def test_constructor_invalid_Z0_isostring(self, z): + # GH 8910 + msg = f"Unknown datetime string format, unable to parse: 2014-11-02 01:00{z}" + with pytest.raises(ValueError, match=msg): + Timestamp(f"2014-11-02 01:00{z}") + + def test_out_of_bounds_integer_value(self): + # GH#26651 check that we raise OutOfBoundsDatetime, not OverflowError + msg = str(Timestamp.max._value * 2) + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(Timestamp.max._value * 2) + msg = str(Timestamp.min._value * 2) + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(Timestamp.min._value * 2) + + def test_out_of_bounds_value(self): + one_us = np.timedelta64(1).astype("timedelta64[us]") + + # By definition we can't go out of bounds in [ns], so we + # convert the datetime64s to [us] so we can go out of bounds + min_ts_us = np.datetime64(Timestamp.min).astype("M8[us]") + one_us + max_ts_us = np.datetime64(Timestamp.max).astype("M8[us]") + + # No error for the min/max datetimes + Timestamp(min_ts_us) + Timestamp(max_ts_us) + + # We used to raise on these before supporting non-nano + us_val = NpyDatetimeUnit.NPY_FR_us.value + assert Timestamp(min_ts_us - one_us)._creso == us_val + assert Timestamp(max_ts_us + one_us)._creso == us_val + + # https://github.com/numpy/numpy/issues/22346 for why + # we can't use the same construction as above with minute resolution + + # too_low, too_high are the _just_ outside the range of M8[s] + too_low = np.datetime64("-292277022657-01-27T08:29", "m") + too_high = np.datetime64("292277026596-12-04T15:31", "m") + + msg = "Out of bounds" + # One us less than the minimum is an error + with pytest.raises(ValueError, match=msg): + Timestamp(too_low) + + # One us more than the maximum is an error + with pytest.raises(ValueError, match=msg): + Timestamp(too_high) + + def test_out_of_bounds_string(self): + msg = "Cannot cast .* to unit='ns' without overflow" + with pytest.raises(ValueError, match=msg): + Timestamp("1676-01-01").as_unit("ns") + with pytest.raises(ValueError, match=msg): + Timestamp("2263-01-01").as_unit("ns") + + ts = Timestamp("2263-01-01") + assert ts.unit == "s" + + ts = Timestamp("1676-01-01") + assert ts.unit == "s" + + def test_barely_out_of_bounds(self): + # GH#19529 + # GH#19382 close enough to bounds that dropping nanos would result + # in an in-bounds datetime + msg = "Out of bounds nanosecond timestamp: 2262-04-11 23:47:16" + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp("2262-04-11 23:47:16.854775808") + + @pytest.mark.skip_ubsan + def test_bounds_with_different_units(self): + out_of_bounds_dates = ("1677-09-21", "2262-04-12") + + time_units = ("D", "h", "m", "s", "ms", "us") + + for date_string in out_of_bounds_dates: + for unit in time_units: + dt64 = np.datetime64(date_string, unit) + ts = Timestamp(dt64) + if unit in ["s", "ms", "us"]: + # We can preserve the input unit + assert ts._value == dt64.view("i8") + else: + # we chose the closest unit that we _do_ support + assert ts._creso == NpyDatetimeUnit.NPY_FR_s.value + + # With more extreme cases, we can't even fit inside second resolution + info = np.iinfo(np.int64) + msg = "Out of bounds second timestamp:" + for value in [info.min + 1, info.max]: + for unit in ["D", "h", "m"]: + dt64 = np.datetime64(value, unit) + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(dt64) + + in_bounds_dates = ("1677-09-23", "2262-04-11") + + for date_string in in_bounds_dates: + for unit in time_units: + dt64 = np.datetime64(date_string, unit) + Timestamp(dt64) + + @pytest.mark.parametrize("arg", ["001-01-01", "0001-01-01"]) + def test_out_of_bounds_string_consistency(self, arg): + # GH 15829 + msg = "Cannot cast 0001-01-01 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(arg).as_unit("ns") + + ts = Timestamp(arg) + assert ts.unit == "s" + assert ts.year == ts.month == ts.day == 1 + + def test_min_valid(self): + # Ensure that Timestamp.min is a valid Timestamp + Timestamp(Timestamp.min) + + def test_max_valid(self): + # Ensure that Timestamp.max is a valid Timestamp + Timestamp(Timestamp.max) + + @pytest.mark.parametrize("offset", ["+0300", "+0200"]) + def test_construct_timestamp_near_dst(self, offset): + # GH 20854 + expected = Timestamp(f"2016-10-30 03:00:00{offset}", tz="Europe/Helsinki") + result = Timestamp(expected).tz_convert("Europe/Helsinki") + assert result == expected + + @pytest.mark.parametrize( + "arg", ["2013/01/01 00:00:00+09:00", "2013-01-01 00:00:00+09:00"] + ) + def test_construct_with_different_string_format(self, arg): + # GH 12064 + result = Timestamp(arg) + expected = Timestamp(datetime(2013, 1, 1), tz=pytz.FixedOffset(540)) + assert result == expected + + @pytest.mark.parametrize("box", [datetime, Timestamp]) + def test_raise_tz_and_tzinfo_in_datetime_input(self, box): + # GH 23579 + kwargs = {"year": 2018, "month": 1, "day": 1, "tzinfo": pytz.utc} + msg = "Cannot pass a datetime or Timestamp" + with pytest.raises(ValueError, match=msg): + Timestamp(box(**kwargs), tz="US/Pacific") + msg = "Cannot pass a datetime or Timestamp" + with pytest.raises(ValueError, match=msg): + Timestamp(box(**kwargs), tzinfo=pytz.timezone("US/Pacific")) + + def test_dont_convert_dateutil_utc_to_pytz_utc(self): + result = Timestamp(datetime(2018, 1, 1), tz=tzutc()) + expected = Timestamp(datetime(2018, 1, 1)).tz_localize(tzutc()) + assert result == expected + + def test_constructor_subclassed_datetime(self): + # GH 25851 + # ensure that subclassed datetime works for + # Timestamp creation + class SubDatetime(datetime): + pass + + data = SubDatetime(2000, 1, 1) + result = Timestamp(data) + expected = Timestamp(2000, 1, 1) + assert result == expected + + def test_timestamp_constructor_tz_utc(self): + utc_stamp = Timestamp("3/11/2012 05:00", tz="utc") + assert utc_stamp.tzinfo is timezone.utc + assert utc_stamp.hour == 5 + + utc_stamp = Timestamp("3/11/2012 05:00").tz_localize("utc") + assert utc_stamp.hour == 5 + + def test_timestamp_to_datetime_tzoffset(self): + tzinfo = tzoffset(None, 7200) + expected = Timestamp("3/11/2012 04:00", tz=tzinfo) + result = Timestamp(expected.to_pydatetime()) + assert expected == result + + def test_timestamp_constructor_near_dst_boundary(self): + # GH#11481 & GH#15777 + # Naive string timestamps were being localized incorrectly + # with tz_convert_from_utc_single instead of tz_localize_to_utc + + for tz in ["Europe/Brussels", "Europe/Prague"]: + result = Timestamp("2015-10-25 01:00", tz=tz) + expected = Timestamp("2015-10-25 01:00").tz_localize(tz) + assert result == expected + + msg = "Cannot infer dst time from 2015-10-25 02:00:00" + with pytest.raises(pytz.AmbiguousTimeError, match=msg): + Timestamp("2015-10-25 02:00", tz=tz) + + result = Timestamp("2017-03-26 01:00", tz="Europe/Paris") + expected = Timestamp("2017-03-26 01:00").tz_localize("Europe/Paris") + assert result == expected + + msg = "2017-03-26 02:00" + with pytest.raises(pytz.NonExistentTimeError, match=msg): + Timestamp("2017-03-26 02:00", tz="Europe/Paris") + + # GH#11708 + naive = Timestamp("2015-11-18 10:00:00") + result = naive.tz_localize("UTC").tz_convert("Asia/Kolkata") + expected = Timestamp("2015-11-18 15:30:00+0530", tz="Asia/Kolkata") + assert result == expected + + # GH#15823 + result = Timestamp("2017-03-26 00:00", tz="Europe/Paris") + expected = Timestamp("2017-03-26 00:00:00+0100", tz="Europe/Paris") + assert result == expected + + result = Timestamp("2017-03-26 01:00", tz="Europe/Paris") + expected = Timestamp("2017-03-26 01:00:00+0100", tz="Europe/Paris") + assert result == expected + + msg = "2017-03-26 02:00" + with pytest.raises(pytz.NonExistentTimeError, match=msg): + Timestamp("2017-03-26 02:00", tz="Europe/Paris") + + result = Timestamp("2017-03-26 02:00:00+0100", tz="Europe/Paris") + naive = Timestamp(result.as_unit("ns")._value) + expected = naive.tz_localize("UTC").tz_convert("Europe/Paris") + assert result == expected + + result = Timestamp("2017-03-26 03:00", tz="Europe/Paris") + expected = Timestamp("2017-03-26 03:00:00+0200", tz="Europe/Paris") + assert result == expected + + @pytest.mark.parametrize( + "tz", + [ + pytz.timezone("US/Eastern"), + gettz("US/Eastern"), + "US/Eastern", + "dateutil/US/Eastern", + ], + ) + def test_timestamp_constructed_by_date_and_tz(self, tz): + # GH#2993, Timestamp cannot be constructed by datetime.date + # and tz correctly + + result = Timestamp(date(2012, 3, 11), tz=tz) + + expected = Timestamp("3/11/2012", tz=tz) + assert result.hour == expected.hour + assert result == expected + + +def test_constructor_ambiguous_dst(): + # GH 24329 + # Make sure that calling Timestamp constructor + # on Timestamp created from ambiguous time + # doesn't change Timestamp.value + ts = Timestamp(1382835600000000000, tz="dateutil/Europe/London") + expected = ts._value + result = Timestamp(ts)._value + assert result == expected + + +@pytest.mark.parametrize("epoch", [1552211999999999872, 1552211999999999999]) +def test_constructor_before_dst_switch(epoch): + # GH 31043 + # Make sure that calling Timestamp constructor + # on time just before DST switch doesn't lead to + # nonexistent time or value change + ts = Timestamp(epoch, tz="dateutil/America/Los_Angeles") + result = ts.tz.dst(ts) + expected = timedelta(seconds=0) + assert Timestamp(ts)._value == epoch + assert result == expected + + +def test_timestamp_constructor_identity(): + # Test for #30543 + expected = Timestamp("2017-01-01T12") + result = Timestamp(expected) + assert result is expected + + +@pytest.mark.parametrize("nano", [-1, 1000]) +def test_timestamp_nano_range(nano): + # GH 48255 + with pytest.raises(ValueError, match="nanosecond must be in 0..999"): + Timestamp(year=2022, month=1, day=1, nanosecond=nano) + + +def test_non_nano_value(): + # https://github.com/pandas-dev/pandas/issues/49076 + result = Timestamp("1800-01-01", unit="s").value + # `.value` shows nanoseconds, even though unit is 's' + assert result == -5364662400000000000 + + # out-of-nanoseconds-bounds `.value` raises informative message + msg = ( + r"Cannot convert Timestamp to nanoseconds without overflow. " + r"Use `.asm8.view\('i8'\)` to cast represent Timestamp in its " + r"own unit \(here, s\).$" + ) + ts = Timestamp("0300-01-01") + with pytest.raises(OverflowError, match=msg): + ts.value + # check that the suggested workaround actually works + result = ts.asm8.view("i8") + assert result == -52700112000 + + +@pytest.mark.parametrize("na_value", [None, np.nan, np.datetime64("NaT"), NaT, NA]) +def test_timestamp_constructor_na_value(na_value): + # GH45481 + result = Timestamp(na_value) + expected = NaT + assert result is expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_formats.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..e7ebcccef1c86ee34cb8889d8840b77f5cfc2616 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_formats.py @@ -0,0 +1,201 @@ +from datetime import datetime +import pprint + +import dateutil.tz +import pytest +import pytz # a test below uses pytz but only inside a `eval` call + +from pandas import Timestamp + +ts_no_ns = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, +) +ts_no_ns_year1 = Timestamp( + year=1, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, +) +ts_ns = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, + nanosecond=123, +) +ts_ns_tz = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, + nanosecond=123, + tz="UTC", +) +ts_no_us = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=0, + nanosecond=123, +) + + +@pytest.mark.parametrize( + "ts, timespec, expected_iso", + [ + (ts_no_ns, "auto", "2019-05-18T15:17:08.132263"), + (ts_no_ns, "seconds", "2019-05-18T15:17:08"), + (ts_no_ns, "nanoseconds", "2019-05-18T15:17:08.132263000"), + (ts_no_ns_year1, "seconds", "0001-05-18T15:17:08"), + (ts_no_ns_year1, "nanoseconds", "0001-05-18T15:17:08.132263000"), + (ts_ns, "auto", "2019-05-18T15:17:08.132263123"), + (ts_ns, "hours", "2019-05-18T15"), + (ts_ns, "minutes", "2019-05-18T15:17"), + (ts_ns, "seconds", "2019-05-18T15:17:08"), + (ts_ns, "milliseconds", "2019-05-18T15:17:08.132"), + (ts_ns, "microseconds", "2019-05-18T15:17:08.132263"), + (ts_ns, "nanoseconds", "2019-05-18T15:17:08.132263123"), + (ts_ns_tz, "auto", "2019-05-18T15:17:08.132263123+00:00"), + (ts_ns_tz, "hours", "2019-05-18T15+00:00"), + (ts_ns_tz, "minutes", "2019-05-18T15:17+00:00"), + (ts_ns_tz, "seconds", "2019-05-18T15:17:08+00:00"), + (ts_ns_tz, "milliseconds", "2019-05-18T15:17:08.132+00:00"), + (ts_ns_tz, "microseconds", "2019-05-18T15:17:08.132263+00:00"), + (ts_ns_tz, "nanoseconds", "2019-05-18T15:17:08.132263123+00:00"), + (ts_no_us, "auto", "2019-05-18T15:17:08.000000123"), + ], +) +def test_isoformat(ts, timespec, expected_iso): + assert ts.isoformat(timespec=timespec) == expected_iso + + +class TestTimestampRendering: + timezones = ["UTC", "Asia/Tokyo", "US/Eastern", "dateutil/America/Los_Angeles"] + + @pytest.mark.parametrize("tz", timezones) + @pytest.mark.parametrize("freq", ["D", "M", "S", "N"]) + @pytest.mark.parametrize( + "date", ["2014-03-07", "2014-01-01 09:00", "2014-01-01 00:00:00.000000001"] + ) + def test_repr(self, date, freq, tz): + # avoid to match with timezone name + freq_repr = f"'{freq}'" + if tz.startswith("dateutil"): + tz_repr = tz.replace("dateutil", "") + else: + tz_repr = tz + + date_only = Timestamp(date) + assert date in repr(date_only) + assert tz_repr not in repr(date_only) + assert freq_repr not in repr(date_only) + assert date_only == eval(repr(date_only)) + + date_tz = Timestamp(date, tz=tz) + assert date in repr(date_tz) + assert tz_repr in repr(date_tz) + assert freq_repr not in repr(date_tz) + assert date_tz == eval(repr(date_tz)) + + def test_repr_utcoffset(self): + # This can cause the tz field to be populated, but it's redundant to + # include this information in the date-string. + date_with_utc_offset = Timestamp("2014-03-13 00:00:00-0400", tz=None) + assert "2014-03-13 00:00:00-0400" in repr(date_with_utc_offset) + assert "tzoffset" not in repr(date_with_utc_offset) + assert "UTC-04:00" in repr(date_with_utc_offset) + expr = repr(date_with_utc_offset) + assert date_with_utc_offset == eval(expr) + + def test_timestamp_repr_pre1900(self): + # pre-1900 + stamp = Timestamp("1850-01-01", tz="US/Eastern") + repr(stamp) + + iso8601 = "1850-01-01 01:23:45.012345" + stamp = Timestamp(iso8601, tz="US/Eastern") + result = repr(stamp) + assert iso8601 in result + + def test_pprint(self): + # GH#12622 + nested_obj = {"foo": 1, "bar": [{"w": {"a": Timestamp("2011-01-01")}}] * 10} + result = pprint.pformat(nested_obj, width=50) + expected = r"""{'bar': [{'w': {'a': Timestamp('2011-01-01 00:00:00')}}, + {'w': {'a': Timestamp('2011-01-01 00:00:00')}}, + {'w': {'a': Timestamp('2011-01-01 00:00:00')}}, + {'w': {'a': Timestamp('2011-01-01 00:00:00')}}, + {'w': {'a': Timestamp('2011-01-01 00:00:00')}}, + {'w': {'a': Timestamp('2011-01-01 00:00:00')}}, + {'w': {'a': Timestamp('2011-01-01 00:00:00')}}, + {'w': {'a': Timestamp('2011-01-01 00:00:00')}}, + {'w': {'a': Timestamp('2011-01-01 00:00:00')}}, + {'w': {'a': Timestamp('2011-01-01 00:00:00')}}], + 'foo': 1}""" + assert result == expected + + def test_to_timestamp_repr_is_code(self): + zs = [ + Timestamp("99-04-17 00:00:00", tz="UTC"), + Timestamp("2001-04-17 00:00:00", tz="UTC"), + Timestamp("2001-04-17 00:00:00", tz="America/Los_Angeles"), + Timestamp("2001-04-17 00:00:00", tz=None), + ] + for z in zs: + assert eval(repr(z)) == z + + def test_repr_matches_pydatetime_no_tz(self): + dt_date = datetime(2013, 1, 2) + assert str(dt_date) == str(Timestamp(dt_date)) + + dt_datetime = datetime(2013, 1, 2, 12, 1, 3) + assert str(dt_datetime) == str(Timestamp(dt_datetime)) + + dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45) + assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) + + ts_nanos_only = Timestamp(200) + assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200" + + ts_nanos_micros = Timestamp(1200) + assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200" + + def test_repr_matches_pydatetime_tz_pytz(self): + dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc) + assert str(dt_date) == str(Timestamp(dt_date)) + + dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc) + assert str(dt_datetime) == str(Timestamp(dt_datetime)) + + dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc) + assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) + + def test_repr_matches_pydatetime_tz_dateutil(self): + utc = dateutil.tz.tzutc() + + dt_date = datetime(2013, 1, 2, tzinfo=utc) + assert str(dt_date) == str(Timestamp(dt_date)) + + dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc) + assert str(dt_datetime) == str(Timestamp(dt_datetime)) + + dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc) + assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_timestamp.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_timestamp.py new file mode 100644 index 0000000000000000000000000000000000000000..05e1c93e1a676a977bde93dffcbdb7f389db5f37 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_timestamp.py @@ -0,0 +1,928 @@ +""" test the scalar Timestamp """ + +import calendar +from datetime import ( + datetime, + timedelta, + timezone, +) +import locale +import time +import unicodedata + +from dateutil.tz import ( + tzlocal, + tzutc, +) +from hypothesis import ( + given, + strategies as st, +) +import numpy as np +import pytest +import pytz +from pytz import utc + +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas._libs.tslibs.timezones import ( + dateutil_gettz as gettz, + get_timezone, + maybe_get_tz, + tz_compare, +) +from pandas.compat import IS64 + +from pandas import ( + NaT, + Timedelta, + Timestamp, +) +import pandas._testing as tm + +from pandas.tseries import offsets +from pandas.tseries.frequencies import to_offset + + +class TestTimestampProperties: + def test_properties_business(self): + freq = to_offset("B") + + ts = Timestamp("2017-10-01") + assert ts.dayofweek == 6 + assert ts.day_of_week == 6 + assert ts.is_month_start # not a weekday + assert not freq.is_month_start(ts) + assert freq.is_month_start(ts + Timedelta(days=1)) + assert not freq.is_quarter_start(ts) + assert freq.is_quarter_start(ts + Timedelta(days=1)) + + ts = Timestamp("2017-09-30") + assert ts.dayofweek == 5 + assert ts.day_of_week == 5 + assert ts.is_month_end + assert not freq.is_month_end(ts) + assert freq.is_month_end(ts - Timedelta(days=1)) + assert ts.is_quarter_end + assert not freq.is_quarter_end(ts) + assert freq.is_quarter_end(ts - Timedelta(days=1)) + + @pytest.mark.parametrize( + "attr, expected", + [ + ["year", 2014], + ["month", 12], + ["day", 31], + ["hour", 23], + ["minute", 59], + ["second", 0], + ["microsecond", 0], + ["nanosecond", 0], + ["dayofweek", 2], + ["day_of_week", 2], + ["quarter", 4], + ["dayofyear", 365], + ["day_of_year", 365], + ["week", 1], + ["daysinmonth", 31], + ], + ) + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_fields(self, attr, expected, tz): + # GH 10050 + # GH 13303 + ts = Timestamp("2014-12-31 23:59:00", tz=tz) + result = getattr(ts, attr) + # that we are int like + assert isinstance(result, int) + assert result == expected + + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_millisecond_raises(self, tz): + ts = Timestamp("2014-12-31 23:59:00", tz=tz) + msg = "'Timestamp' object has no attribute 'millisecond'" + with pytest.raises(AttributeError, match=msg): + ts.millisecond + + @pytest.mark.parametrize( + "start", ["is_month_start", "is_quarter_start", "is_year_start"] + ) + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_is_start(self, start, tz): + ts = Timestamp("2014-01-01 00:00:00", tz=tz) + assert getattr(ts, start) + + @pytest.mark.parametrize("end", ["is_month_end", "is_year_end", "is_quarter_end"]) + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_is_end(self, end, tz): + ts = Timestamp("2014-12-31 23:59:59", tz=tz) + assert getattr(ts, end) + + # GH 12806 + @pytest.mark.parametrize( + "data", + [Timestamp("2017-08-28 23:00:00"), Timestamp("2017-08-28 23:00:00", tz="EST")], + ) + # error: Unsupported operand types for + ("List[None]" and "List[str]") + @pytest.mark.parametrize( + "time_locale", [None] + tm.get_locales() # type: ignore[operator] + ) + def test_names(self, data, time_locale): + # GH 17354 + # Test .day_name(), .month_name + if time_locale is None: + expected_day = "Monday" + expected_month = "August" + else: + with tm.set_locale(time_locale, locale.LC_TIME): + expected_day = calendar.day_name[0].capitalize() + expected_month = calendar.month_name[8].capitalize() + + result_day = data.day_name(time_locale) + result_month = data.month_name(time_locale) + + # Work around https://github.com/pandas-dev/pandas/issues/22342 + # different normalizations + expected_day = unicodedata.normalize("NFD", expected_day) + expected_month = unicodedata.normalize("NFD", expected_month) + + result_day = unicodedata.normalize("NFD", result_day) + result_month = unicodedata.normalize("NFD", result_month) + + assert result_day == expected_day + assert result_month == expected_month + + # Test NaT + nan_ts = Timestamp(NaT) + assert np.isnan(nan_ts.day_name(time_locale)) + assert np.isnan(nan_ts.month_name(time_locale)) + + def test_is_leap_year(self, tz_naive_fixture): + tz = tz_naive_fixture + if not IS64 and tz == tzlocal(): + # https://github.com/dateutil/dateutil/issues/197 + pytest.skip( + "tzlocal() on a 32 bit platform causes internal overflow errors" + ) + # GH 13727 + dt = Timestamp("2000-01-01 00:00:00", tz=tz) + assert dt.is_leap_year + assert isinstance(dt.is_leap_year, bool) + + dt = Timestamp("1999-01-01 00:00:00", tz=tz) + assert not dt.is_leap_year + + dt = Timestamp("2004-01-01 00:00:00", tz=tz) + assert dt.is_leap_year + + dt = Timestamp("2100-01-01 00:00:00", tz=tz) + assert not dt.is_leap_year + + def test_woy_boundary(self): + # make sure weeks at year boundaries are correct + d = datetime(2013, 12, 31) + result = Timestamp(d).week + expected = 1 # ISO standard + assert result == expected + + d = datetime(2008, 12, 28) + result = Timestamp(d).week + expected = 52 # ISO standard + assert result == expected + + d = datetime(2009, 12, 31) + result = Timestamp(d).week + expected = 53 # ISO standard + assert result == expected + + d = datetime(2010, 1, 1) + result = Timestamp(d).week + expected = 53 # ISO standard + assert result == expected + + d = datetime(2010, 1, 3) + result = Timestamp(d).week + expected = 53 # ISO standard + assert result == expected + + result = np.array( + [ + Timestamp(datetime(*args)).week + for args in [(2000, 1, 1), (2000, 1, 2), (2005, 1, 1), (2005, 1, 2)] + ] + ) + assert (result == [52, 52, 53, 53]).all() + + def test_resolution(self): + # GH#21336, GH#21365 + dt = Timestamp("2100-01-01 00:00:00.000000000") + assert dt.resolution == Timedelta(nanoseconds=1) + + # Check that the attribute is available on the class, mirroring + # the stdlib datetime behavior + assert Timestamp.resolution == Timedelta(nanoseconds=1) + + assert dt.as_unit("us").resolution == Timedelta(microseconds=1) + assert dt.as_unit("ms").resolution == Timedelta(milliseconds=1) + assert dt.as_unit("s").resolution == Timedelta(seconds=1) + + @pytest.mark.parametrize( + "date_string, expected", + [ + ("0000-2-29", 1), + ("0000-3-1", 2), + ("1582-10-14", 3), + ("-0040-1-1", 4), + ("2023-06-18", 6), + ], + ) + def test_dow_historic(self, date_string, expected): + # GH 53738 + ts = Timestamp(date_string) + dow = ts.weekday() + assert dow == expected + + @given( + ts=st.datetimes(), + sign=st.sampled_from(["-", ""]), + ) + def test_dow_parametric(self, ts, sign): + # GH 53738 + ts = ( + f"{sign}{str(ts.year).zfill(4)}" + f"-{str(ts.month).zfill(2)}" + f"-{str(ts.day).zfill(2)}" + ) + result = Timestamp(ts).weekday() + expected = ( + (np.datetime64(ts) - np.datetime64("1970-01-01")).astype("int64") - 4 + ) % 7 + assert result == expected + + +class TestTimestamp: + @pytest.mark.parametrize("tz", [None, pytz.timezone("US/Pacific")]) + def test_disallow_setting_tz(self, tz): + # GH#3746 + ts = Timestamp("2010") + msg = "Cannot directly set timezone" + with pytest.raises(AttributeError, match=msg): + ts.tz = tz + + def test_default_to_stdlib_utc(self): + assert Timestamp.utcnow().tz is timezone.utc + assert Timestamp.now("UTC").tz is timezone.utc + assert Timestamp("2016-01-01", tz="UTC").tz is timezone.utc + + def test_tz(self): + tstr = "2014-02-01 09:00" + ts = Timestamp(tstr) + local = ts.tz_localize("Asia/Tokyo") + assert local.hour == 9 + assert local == Timestamp(tstr, tz="Asia/Tokyo") + conv = local.tz_convert("US/Eastern") + assert conv == Timestamp("2014-01-31 19:00", tz="US/Eastern") + assert conv.hour == 19 + + # preserves nanosecond + ts = Timestamp(tstr) + offsets.Nano(5) + local = ts.tz_localize("Asia/Tokyo") + assert local.hour == 9 + assert local.nanosecond == 5 + conv = local.tz_convert("US/Eastern") + assert conv.nanosecond == 5 + assert conv.hour == 19 + + def test_utc_z_designator(self): + assert get_timezone(Timestamp("2014-11-02 01:00Z").tzinfo) is timezone.utc + + def test_asm8(self): + ns = [Timestamp.min._value, Timestamp.max._value, 1000] + + for n in ns: + assert ( + Timestamp(n).asm8.view("i8") == np.datetime64(n, "ns").view("i8") == n + ) + + assert Timestamp("nat").asm8.view("i8") == np.datetime64("nat", "ns").view("i8") + + def test_class_ops(self): + def compare(x, y): + assert int((Timestamp(x)._value - Timestamp(y)._value) / 1e9) == 0 + + compare(Timestamp.now(), datetime.now()) + compare(Timestamp.now("UTC"), datetime.now(pytz.timezone("UTC"))) + compare(Timestamp.now("UTC"), datetime.now(tzutc())) + compare(Timestamp.utcnow(), datetime.now(timezone.utc)) + compare(Timestamp.today(), datetime.today()) + current_time = calendar.timegm(datetime.now().utctimetuple()) + + ts_utc = Timestamp.utcfromtimestamp(current_time) + assert ts_utc.timestamp() == current_time + compare( + Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time) + ) + compare( + # Support tz kwarg in Timestamp.fromtimestamp + Timestamp.fromtimestamp(current_time, "UTC"), + datetime.fromtimestamp(current_time, utc), + ) + compare( + # Support tz kwarg in Timestamp.fromtimestamp + Timestamp.fromtimestamp(current_time, tz="UTC"), + datetime.fromtimestamp(current_time, utc), + ) + + date_component = datetime.now(timezone.utc) + time_component = (date_component + timedelta(minutes=10)).time() + compare( + Timestamp.combine(date_component, time_component), + datetime.combine(date_component, time_component), + ) + + def test_basics_nanos(self): + val = np.int64(946_684_800_000_000_000).view("M8[ns]") + stamp = Timestamp(val.view("i8") + 500) + assert stamp.year == 2000 + assert stamp.month == 1 + assert stamp.microsecond == 0 + assert stamp.nanosecond == 500 + + # GH 14415 + val = np.iinfo(np.int64).min + 80_000_000_000_000 + stamp = Timestamp(val) + assert stamp.year == 1677 + assert stamp.month == 9 + assert stamp.day == 21 + assert stamp.microsecond == 145224 + assert stamp.nanosecond == 192 + + def test_roundtrip(self): + # test value to string and back conversions + # further test accessors + base = Timestamp("20140101 00:00:00").as_unit("ns") + + result = Timestamp(base._value + Timedelta("5ms")._value) + assert result == Timestamp(f"{base}.005000") + assert result.microsecond == 5000 + + result = Timestamp(base._value + Timedelta("5us")._value) + assert result == Timestamp(f"{base}.000005") + assert result.microsecond == 5 + + result = Timestamp(base._value + Timedelta("5ns")._value) + assert result == Timestamp(f"{base}.000000005") + assert result.nanosecond == 5 + assert result.microsecond == 0 + + result = Timestamp(base._value + Timedelta("6ms 5us")._value) + assert result == Timestamp(f"{base}.006005") + assert result.microsecond == 5 + 6 * 1000 + + result = Timestamp(base._value + Timedelta("200ms 5us")._value) + assert result == Timestamp(f"{base}.200005") + assert result.microsecond == 5 + 200 * 1000 + + def test_hash_equivalent(self): + d = {datetime(2011, 1, 1): 5} + stamp = Timestamp(datetime(2011, 1, 1)) + assert d[stamp] == 5 + + @pytest.mark.parametrize( + "timezone, year, month, day, hour", + [["America/Chicago", 2013, 11, 3, 1], ["America/Santiago", 2021, 4, 3, 23]], + ) + def test_hash_timestamp_with_fold(self, timezone, year, month, day, hour): + # see gh-33931 + test_timezone = gettz(timezone) + transition_1 = Timestamp( + year=year, + month=month, + day=day, + hour=hour, + minute=0, + fold=0, + tzinfo=test_timezone, + ) + transition_2 = Timestamp( + year=year, + month=month, + day=day, + hour=hour, + minute=0, + fold=1, + tzinfo=test_timezone, + ) + assert hash(transition_1) == hash(transition_2) + + +class TestTimestampNsOperations: + def test_nanosecond_string_parsing(self): + ts = Timestamp("2013-05-01 07:15:45.123456789") + # GH 7878 + expected_repr = "2013-05-01 07:15:45.123456789" + expected_value = 1_367_392_545_123_456_789 + assert ts._value == expected_value + assert expected_repr in repr(ts) + + ts = Timestamp("2013-05-01 07:15:45.123456789+09:00", tz="Asia/Tokyo") + assert ts._value == expected_value - 9 * 3600 * 1_000_000_000 + assert expected_repr in repr(ts) + + ts = Timestamp("2013-05-01 07:15:45.123456789", tz="UTC") + assert ts._value == expected_value + assert expected_repr in repr(ts) + + ts = Timestamp("2013-05-01 07:15:45.123456789", tz="US/Eastern") + assert ts._value == expected_value + 4 * 3600 * 1_000_000_000 + assert expected_repr in repr(ts) + + # GH 10041 + ts = Timestamp("20130501T071545.123456789") + assert ts._value == expected_value + assert expected_repr in repr(ts) + + def test_nanosecond_timestamp(self): + # GH 7610 + expected = 1_293_840_000_000_000_005 + t = Timestamp("2011-01-01") + offsets.Nano(5) + assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')" + assert t._value == expected + assert t.nanosecond == 5 + + t = Timestamp(t) + assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')" + assert t._value == expected + assert t.nanosecond == 5 + + t = Timestamp("2011-01-01 00:00:00.000000005") + assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')" + assert t._value == expected + assert t.nanosecond == 5 + + expected = 1_293_840_000_000_000_010 + t = t + offsets.Nano(5) + assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')" + assert t._value == expected + assert t.nanosecond == 10 + + t = Timestamp(t) + assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')" + assert t._value == expected + assert t.nanosecond == 10 + + t = Timestamp("2011-01-01 00:00:00.000000010") + assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')" + assert t._value == expected + assert t.nanosecond == 10 + + +class TestTimestampConversion: + def test_conversion(self): + # GH#9255 + ts = Timestamp("2000-01-01").as_unit("ns") + + result = ts.to_pydatetime() + expected = datetime(2000, 1, 1) + assert result == expected + assert type(result) == type(expected) + + result = ts.to_datetime64() + expected = np.datetime64(ts._value, "ns") + assert result == expected + assert type(result) == type(expected) + assert result.dtype == expected.dtype + + def test_to_period_tz_warning(self): + # GH#21333 make sure a warning is issued when timezone + # info is lost + ts = Timestamp("2009-04-15 16:17:18", tz="US/Eastern") + with tm.assert_produces_warning(UserWarning): + # warning that timezone info will be lost + ts.to_period("D") + + def test_to_numpy_alias(self): + # GH 24653: alias .to_numpy() for scalars + ts = Timestamp(datetime.now()) + assert ts.to_datetime64() == ts.to_numpy() + + # GH#44460 + msg = "dtype and copy arguments are ignored" + with pytest.raises(ValueError, match=msg): + ts.to_numpy("M8[s]") + with pytest.raises(ValueError, match=msg): + ts.to_numpy(copy=True) + + +class TestNonNano: + @pytest.fixture(params=["s", "ms", "us"]) + def reso(self, request): + return request.param + + @pytest.fixture + def dt64(self, reso): + # cases that are in-bounds for nanosecond, so we can compare against + # the existing implementation. + return np.datetime64("2016-01-01", reso) + + @pytest.fixture + def ts(self, dt64): + return Timestamp._from_dt64(dt64) + + @pytest.fixture + def ts_tz(self, ts, tz_aware_fixture): + tz = maybe_get_tz(tz_aware_fixture) + return Timestamp._from_value_and_reso(ts._value, ts._creso, tz) + + def test_non_nano_construction(self, dt64, ts, reso): + assert ts._value == dt64.view("i8") + + if reso == "s": + assert ts._creso == NpyDatetimeUnit.NPY_FR_s.value + elif reso == "ms": + assert ts._creso == NpyDatetimeUnit.NPY_FR_ms.value + elif reso == "us": + assert ts._creso == NpyDatetimeUnit.NPY_FR_us.value + + def test_non_nano_fields(self, dt64, ts): + alt = Timestamp(dt64) + + assert ts.year == alt.year + assert ts.month == alt.month + assert ts.day == alt.day + assert ts.hour == ts.minute == ts.second == ts.microsecond == 0 + assert ts.nanosecond == 0 + + assert ts.to_julian_date() == alt.to_julian_date() + assert ts.weekday() == alt.weekday() + assert ts.isoweekday() == alt.isoweekday() + + def test_start_end_fields(self, ts): + assert ts.is_year_start + assert ts.is_quarter_start + assert ts.is_month_start + assert not ts.is_year_end + assert not ts.is_month_end + assert not ts.is_month_end + + # 2016-01-01 is a Friday, so is year/quarter/month start with this freq + assert ts.is_year_start + assert ts.is_quarter_start + assert ts.is_month_start + assert not ts.is_year_end + assert not ts.is_month_end + assert not ts.is_month_end + + def test_day_name(self, dt64, ts): + alt = Timestamp(dt64) + assert ts.day_name() == alt.day_name() + + def test_month_name(self, dt64, ts): + alt = Timestamp(dt64) + assert ts.month_name() == alt.month_name() + + def test_tz_convert(self, ts): + ts = Timestamp._from_value_and_reso(ts._value, ts._creso, utc) + + tz = pytz.timezone("US/Pacific") + result = ts.tz_convert(tz) + + assert isinstance(result, Timestamp) + assert result._creso == ts._creso + assert tz_compare(result.tz, tz) + + def test_repr(self, dt64, ts): + alt = Timestamp(dt64) + + assert str(ts) == str(alt) + assert repr(ts) == repr(alt) + + def test_comparison(self, dt64, ts): + alt = Timestamp(dt64) + + assert ts == dt64 + assert dt64 == ts + assert ts == alt + assert alt == ts + + assert not ts != dt64 + assert not dt64 != ts + assert not ts != alt + assert not alt != ts + + assert not ts < dt64 + assert not dt64 < ts + assert not ts < alt + assert not alt < ts + + assert not ts > dt64 + assert not dt64 > ts + assert not ts > alt + assert not alt > ts + + assert ts >= dt64 + assert dt64 >= ts + assert ts >= alt + assert alt >= ts + + assert ts <= dt64 + assert dt64 <= ts + assert ts <= alt + assert alt <= ts + + def test_cmp_cross_reso(self): + # numpy gets this wrong because of silent overflow + dt64 = np.datetime64(9223372800, "s") # won't fit in M8[ns] + ts = Timestamp._from_dt64(dt64) + + # subtracting 3600*24 gives a datetime64 that _can_ fit inside the + # nanosecond implementation bounds. + other = Timestamp(dt64 - 3600 * 24).as_unit("ns") + assert other < ts + assert other.asm8 > ts.asm8 # <- numpy gets this wrong + assert ts > other + assert ts.asm8 < other.asm8 # <- numpy gets this wrong + assert not other == ts + assert ts != other + + @pytest.mark.xfail(reason="Dispatches to np.datetime64 which is wrong") + def test_cmp_cross_reso_reversed_dt64(self): + dt64 = np.datetime64(106752, "D") # won't fit in M8[ns] + ts = Timestamp._from_dt64(dt64) + other = Timestamp(dt64 - 1) + + assert other.asm8 < ts + + def test_pickle(self, ts, tz_aware_fixture): + tz = tz_aware_fixture + tz = maybe_get_tz(tz) + ts = Timestamp._from_value_and_reso(ts._value, ts._creso, tz) + rt = tm.round_trip_pickle(ts) + assert rt._creso == ts._creso + assert rt == ts + + def test_normalize(self, dt64, ts): + alt = Timestamp(dt64) + result = ts.normalize() + assert result._creso == ts._creso + assert result == alt.normalize() + + def test_asm8(self, dt64, ts): + rt = ts.asm8 + assert rt == dt64 + assert rt.dtype == dt64.dtype + + def test_to_numpy(self, dt64, ts): + res = ts.to_numpy() + assert res == dt64 + assert res.dtype == dt64.dtype + + def test_to_datetime64(self, dt64, ts): + res = ts.to_datetime64() + assert res == dt64 + assert res.dtype == dt64.dtype + + def test_timestamp(self, dt64, ts): + alt = Timestamp(dt64) + assert ts.timestamp() == alt.timestamp() + + def test_to_period(self, dt64, ts): + alt = Timestamp(dt64) + assert ts.to_period("D") == alt.to_period("D") + + @pytest.mark.parametrize( + "td", [timedelta(days=4), Timedelta(days=4), np.timedelta64(4, "D")] + ) + def test_addsub_timedeltalike_non_nano(self, dt64, ts, td): + exp_reso = max(ts._creso, Timedelta(td)._creso) + + result = ts - td + expected = Timestamp(dt64) - td + assert isinstance(result, Timestamp) + assert result._creso == exp_reso + assert result == expected + + result = ts + td + expected = Timestamp(dt64) + td + assert isinstance(result, Timestamp) + assert result._creso == exp_reso + assert result == expected + + result = td + ts + expected = td + Timestamp(dt64) + assert isinstance(result, Timestamp) + assert result._creso == exp_reso + assert result == expected + + def test_addsub_offset(self, ts_tz): + # specifically non-Tick offset + off = offsets.YearEnd(1) + result = ts_tz + off + + assert isinstance(result, Timestamp) + assert result._creso == ts_tz._creso + if ts_tz.month == 12 and ts_tz.day == 31: + assert result.year == ts_tz.year + 1 + else: + assert result.year == ts_tz.year + assert result.day == 31 + assert result.month == 12 + assert tz_compare(result.tz, ts_tz.tz) + + result = ts_tz - off + + assert isinstance(result, Timestamp) + assert result._creso == ts_tz._creso + assert result.year == ts_tz.year - 1 + assert result.day == 31 + assert result.month == 12 + assert tz_compare(result.tz, ts_tz.tz) + + def test_sub_datetimelike_mismatched_reso(self, ts_tz): + # case with non-lossy rounding + ts = ts_tz + + # choose a unit for `other` that doesn't match ts_tz's; + # this construction ensures we get cases with other._creso < ts._creso + # and cases with other._creso > ts._creso + unit = { + NpyDatetimeUnit.NPY_FR_us.value: "ms", + NpyDatetimeUnit.NPY_FR_ms.value: "s", + NpyDatetimeUnit.NPY_FR_s.value: "us", + }[ts._creso] + other = ts.as_unit(unit) + assert other._creso != ts._creso + + result = ts - other + assert isinstance(result, Timedelta) + assert result._value == 0 + assert result._creso == max(ts._creso, other._creso) + + result = other - ts + assert isinstance(result, Timedelta) + assert result._value == 0 + assert result._creso == max(ts._creso, other._creso) + + if ts._creso < other._creso: + # Case where rounding is lossy + other2 = other + Timedelta._from_value_and_reso(1, other._creso) + exp = ts.as_unit(other.unit) - other2 + + res = ts - other2 + assert res == exp + assert res._creso == max(ts._creso, other._creso) + + res = other2 - ts + assert res == -exp + assert res._creso == max(ts._creso, other._creso) + else: + ts2 = ts + Timedelta._from_value_and_reso(1, ts._creso) + exp = ts2 - other.as_unit(ts2.unit) + + res = ts2 - other + assert res == exp + assert res._creso == max(ts._creso, other._creso) + res = other - ts2 + assert res == -exp + assert res._creso == max(ts._creso, other._creso) + + def test_sub_timedeltalike_mismatched_reso(self, ts_tz): + # case with non-lossy rounding + ts = ts_tz + + # choose a unit for `other` that doesn't match ts_tz's; + # this construction ensures we get cases with other._creso < ts._creso + # and cases with other._creso > ts._creso + unit = { + NpyDatetimeUnit.NPY_FR_us.value: "ms", + NpyDatetimeUnit.NPY_FR_ms.value: "s", + NpyDatetimeUnit.NPY_FR_s.value: "us", + }[ts._creso] + other = Timedelta(0).as_unit(unit) + assert other._creso != ts._creso + + result = ts + other + assert isinstance(result, Timestamp) + assert result == ts + assert result._creso == max(ts._creso, other._creso) + + result = other + ts + assert isinstance(result, Timestamp) + assert result == ts + assert result._creso == max(ts._creso, other._creso) + + if ts._creso < other._creso: + # Case where rounding is lossy + other2 = other + Timedelta._from_value_and_reso(1, other._creso) + exp = ts.as_unit(other.unit) + other2 + res = ts + other2 + assert res == exp + assert res._creso == max(ts._creso, other._creso) + res = other2 + ts + assert res == exp + assert res._creso == max(ts._creso, other._creso) + else: + ts2 = ts + Timedelta._from_value_and_reso(1, ts._creso) + exp = ts2 + other.as_unit(ts2.unit) + + res = ts2 + other + assert res == exp + assert res._creso == max(ts._creso, other._creso) + res = other + ts2 + assert res == exp + assert res._creso == max(ts._creso, other._creso) + + def test_addition_doesnt_downcast_reso(self): + # https://github.com/pandas-dev/pandas/pull/48748#pullrequestreview-1122635413 + ts = Timestamp(year=2022, month=1, day=1, microsecond=999999).as_unit("us") + td = Timedelta(microseconds=1).as_unit("us") + res = ts + td + assert res._creso == ts._creso + + def test_sub_timedelta64_mismatched_reso(self, ts_tz): + ts = ts_tz + + res = ts + np.timedelta64(1, "ns") + exp = ts.as_unit("ns") + np.timedelta64(1, "ns") + assert exp == res + assert exp._creso == NpyDatetimeUnit.NPY_FR_ns.value + + def test_min(self, ts): + assert ts.min <= ts + assert ts.min._creso == ts._creso + assert ts.min._value == NaT._value + 1 + + def test_max(self, ts): + assert ts.max >= ts + assert ts.max._creso == ts._creso + assert ts.max._value == np.iinfo(np.int64).max + + def test_resolution(self, ts): + expected = Timedelta._from_value_and_reso(1, ts._creso) + result = ts.resolution + assert result == expected + assert result._creso == expected._creso + + def test_out_of_ns_bounds(self): + # https://github.com/pandas-dev/pandas/issues/51060 + result = Timestamp(-52700112000, unit="s") + assert result == Timestamp("0300-01-01") + assert result.to_numpy() == np.datetime64("0300-01-01T00:00:00", "s") + + +def test_timestamp_class_min_max_resolution(): + # when accessed on the class (as opposed to an instance), we default + # to nanoseconds + assert Timestamp.min == Timestamp(NaT._value + 1) + assert Timestamp.min._creso == NpyDatetimeUnit.NPY_FR_ns.value + + assert Timestamp.max == Timestamp(np.iinfo(np.int64).max) + assert Timestamp.max._creso == NpyDatetimeUnit.NPY_FR_ns.value + + assert Timestamp.resolution == Timedelta(1) + assert Timestamp.resolution._creso == NpyDatetimeUnit.NPY_FR_ns.value + + +def test_delimited_date(): + # https://github.com/pandas-dev/pandas/issues/50231 + with tm.assert_produces_warning(None): + result = Timestamp("13-01-2000") + expected = Timestamp(2000, 1, 13) + assert result == expected + + +def test_utctimetuple(): + # GH 32174 + ts = Timestamp("2000-01-01", tz="UTC") + result = ts.utctimetuple() + expected = time.struct_time((2000, 1, 1, 0, 0, 0, 5, 1, 0)) + assert result == expected + + +def test_negative_dates(): + # https://github.com/pandas-dev/pandas/issues/50787 + ts = Timestamp("-2000-01-01") + msg = ( + " not yet supported on Timestamps which are outside the range of " + "Python's standard library. For now, please call the components you need " + r"\(such as `.year` and `.month`\) and construct your string from there.$" + ) + func = "^strftime" + with pytest.raises(NotImplementedError, match=func + msg): + ts.strftime("%Y") + + msg = ( + " not yet supported on Timestamps which " + "are outside the range of Python's standard library. " + ) + func = "^date" + with pytest.raises(NotImplementedError, match=func + msg): + ts.date() + func = "^isocalendar" + with pytest.raises(NotImplementedError, match=func + msg): + ts.isocalendar() + func = "^timetuple" + with pytest.raises(NotImplementedError, match=func + msg): + ts.timetuple() + func = "^toordinal" + with pytest.raises(NotImplementedError, match=func + msg): + ts.toordinal() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_timezones.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_timezones.py new file mode 100644 index 0000000000000000000000000000000000000000..cb2a35be907cdca8d6777b708623c50288d6cca7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/test_timezones.py @@ -0,0 +1,24 @@ +""" +Tests for Timestamp timezone-related methods +""" +from datetime import datetime + +from pandas._libs.tslibs import timezones + +from pandas import Timestamp + + +class TestTimestampTZOperations: + # ------------------------------------------------------------------ + + def test_timestamp_timetz_equivalent_with_datetime_tz(self, tz_naive_fixture): + # GH21358 + tz = timezones.maybe_get_tz(tz_naive_fixture) + + stamp = Timestamp("2018-06-04 10:20:30", tz=tz) + _datetime = datetime(2018, 6, 4, hour=10, minute=20, second=30, tzinfo=tz) + + result = stamp.timetz() + expected = _datetime.timetz() + + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_aggregation.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_aggregation.py new file mode 100644 index 0000000000000000000000000000000000000000..7695c953712ed9925e4e804d0db1e8cf606a97eb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_aggregation.py @@ -0,0 +1,93 @@ +import numpy as np +import pytest + +from pandas.core.apply import ( + _make_unique_kwarg_list, + maybe_mangle_lambdas, +) + + +def test_maybe_mangle_lambdas_passthrough(): + assert maybe_mangle_lambdas("mean") == "mean" + assert maybe_mangle_lambdas(lambda x: x).__name__ == "" + # don't mangel single lambda. + assert maybe_mangle_lambdas([lambda x: x])[0].__name__ == "" + + +def test_maybe_mangle_lambdas_listlike(): + aggfuncs = [lambda x: 1, lambda x: 2] + result = maybe_mangle_lambdas(aggfuncs) + assert result[0].__name__ == "" + assert result[1].__name__ == "" + assert aggfuncs[0](None) == result[0](None) + assert aggfuncs[1](None) == result[1](None) + + +def test_maybe_mangle_lambdas(): + func = {"A": [lambda x: 0, lambda x: 1]} + result = maybe_mangle_lambdas(func) + assert result["A"][0].__name__ == "" + assert result["A"][1].__name__ == "" + + +def test_maybe_mangle_lambdas_args(): + func = {"A": [lambda x, a, b=1: (0, a, b), lambda x: 1]} + result = maybe_mangle_lambdas(func) + assert result["A"][0].__name__ == "" + assert result["A"][1].__name__ == "" + + assert func["A"][0](0, 1) == (0, 1, 1) + assert func["A"][0](0, 1, 2) == (0, 1, 2) + assert func["A"][0](0, 2, b=3) == (0, 2, 3) + + +def test_maybe_mangle_lambdas_named(): + func = {"C": np.mean, "D": {"foo": np.mean, "bar": np.mean}} + result = maybe_mangle_lambdas(func) + assert result == func + + +@pytest.mark.parametrize( + "order, expected_reorder", + [ + ( + [ + ("height", ""), + ("height", "max"), + ("weight", "max"), + ("height", ""), + ("weight", ""), + ], + [ + ("height", "_0"), + ("height", "max"), + ("weight", "max"), + ("height", "_1"), + ("weight", ""), + ], + ), + ( + [ + ("col2", "min"), + ("col1", ""), + ("col1", ""), + ("col1", ""), + ], + [ + ("col2", "min"), + ("col1", "_0"), + ("col1", "_1"), + ("col1", "_2"), + ], + ), + ( + [("col", ""), ("col", ""), ("col", "")], + [("col", "_0"), ("col", "_1"), ("col", "_2")], + ), + ], +) +def test_make_unique(order, expected_reorder): + # GH 27519, test if make_unique function reorders correctly + result = _make_unique_kwarg_list(order) + + assert result == expected_reorder diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_downstream.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_downstream.py new file mode 100644 index 0000000000000000000000000000000000000000..51ce73ef54300c5d2f9be4e3988de77843a61a8e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_downstream.py @@ -0,0 +1,362 @@ +""" +Testing that we work in the downstream packages +""" +import array +import subprocess +import sys + +import numpy as np +import pytest + +from pandas.errors import IntCastingNaNError +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + TimedeltaIndex, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) + + +@pytest.fixture +def df(): + return DataFrame({"A": [1, 2, 3]}) + + +def test_dask(df): + # dask sets "compute.use_numexpr" to False, so catch the current value + # and ensure to reset it afterwards to avoid impacting other tests + olduse = pd.get_option("compute.use_numexpr") + + try: + pytest.importorskip("toolz") + dd = pytest.importorskip("dask.dataframe") + + ddf = dd.from_pandas(df, npartitions=3) + assert ddf.A is not None + assert ddf.compute() is not None + finally: + pd.set_option("compute.use_numexpr", olduse) + + +def test_dask_ufunc(): + # dask sets "compute.use_numexpr" to False, so catch the current value + # and ensure to reset it afterwards to avoid impacting other tests + olduse = pd.get_option("compute.use_numexpr") + + try: + da = pytest.importorskip("dask.array") + dd = pytest.importorskip("dask.dataframe") + + s = Series([1.5, 2.3, 3.7, 4.0]) + ds = dd.from_pandas(s, npartitions=2) + + result = da.fix(ds).compute() + expected = np.fix(s) + tm.assert_series_equal(result, expected) + finally: + pd.set_option("compute.use_numexpr", olduse) + + +def test_construct_dask_float_array_int_dtype_match_ndarray(): + # GH#40110 make sure we treat a float-dtype dask array with the same + # rules we would for an ndarray + dd = pytest.importorskip("dask.dataframe") + + arr = np.array([1, 2.5, 3]) + darr = dd.from_array(arr) + + res = Series(darr) + expected = Series(arr) + tm.assert_series_equal(res, expected) + + # GH#49599 in 2.0 we raise instead of silently ignoring the dtype + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + Series(darr, dtype="i8") + + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + arr[2] = np.nan + with pytest.raises(IntCastingNaNError, match=msg): + Series(darr, dtype="i8") + # which is the same as we get with a numpy input + with pytest.raises(IntCastingNaNError, match=msg): + Series(arr, dtype="i8") + + +def test_xarray(df): + pytest.importorskip("xarray") + + assert df.to_xarray() is not None + + +def test_xarray_cftimeindex_nearest(): + # https://github.com/pydata/xarray/issues/3751 + cftime = pytest.importorskip("cftime") + xarray = pytest.importorskip("xarray") + + times = xarray.cftime_range("0001", periods=2) + key = cftime.DatetimeGregorian(2000, 1, 1) + result = times.get_indexer([key], method="nearest") + expected = 1 + assert result == expected + + +@pytest.mark.single_cpu +def test_oo_optimizable(): + # GH 21071 + subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"]) + + +@pytest.mark.single_cpu +def test_oo_optimized_datetime_index_unpickle(): + # GH 42866 + subprocess.check_call( + [ + sys.executable, + "-OO", + "-c", + ( + "import pandas as pd, pickle; " + "pickle.loads(pickle.dumps(pd.date_range('2021-01-01', periods=1)))" + ), + ] + ) + + +def test_statsmodels(): + smf = pytest.importorskip("statsmodels.formula.api") + + df = DataFrame( + {"Lottery": range(5), "Literacy": range(5), "Pop1831": range(100, 105)} + ) + smf.ols("Lottery ~ Literacy + np.log(Pop1831)", data=df).fit() + + +def test_scikit_learn(): + pytest.importorskip("sklearn") + from sklearn import ( + datasets, + svm, + ) + + digits = datasets.load_digits() + clf = svm.SVC(gamma=0.001, C=100.0) + clf.fit(digits.data[:-1], digits.target[:-1]) + clf.predict(digits.data[-1:]) + + +def test_seaborn(): + seaborn = pytest.importorskip("seaborn") + tips = DataFrame( + {"day": pd.date_range("2023", freq="D", periods=5), "total_bill": range(5)} + ) + seaborn.stripplot(x="day", y="total_bill", data=tips) + + +def test_pandas_datareader(): + pytest.importorskip("pandas_datareader") + + +@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning") +def test_pyarrow(df): + pyarrow = pytest.importorskip("pyarrow") + table = pyarrow.Table.from_pandas(df) + result = table.to_pandas() + tm.assert_frame_equal(result, df) + + +def test_yaml_dump(df): + # GH#42748 + yaml = pytest.importorskip("yaml") + + dumped = yaml.dump(df) + + loaded = yaml.load(dumped, Loader=yaml.Loader) + tm.assert_frame_equal(df, loaded) + + loaded2 = yaml.load(dumped, Loader=yaml.UnsafeLoader) + tm.assert_frame_equal(df, loaded2) + + +@pytest.mark.single_cpu +def test_missing_required_dependency(): + # GH 23868 + # To ensure proper isolation, we pass these flags + # -S : disable site-packages + # -s : disable user site-packages + # -E : disable PYTHON* env vars, especially PYTHONPATH + # https://github.com/MacPython/pandas-wheels/pull/50 + + pyexe = sys.executable.replace("\\", "/") + + # We skip this test if pandas is installed as a site package. We first + # import the package normally and check the path to the module before + # executing the test which imports pandas with site packages disabled. + call = [pyexe, "-c", "import pandas;print(pandas.__file__)"] + output = subprocess.check_output(call).decode() + if "site-packages" in output: + pytest.skip("pandas installed as site package") + + # This test will fail if pandas is installed as a site package. The flags + # prevent pandas being imported and the test will report Failed: DID NOT + # RAISE + call = [pyexe, "-sSE", "-c", "import pandas"] + + msg = ( + rf"Command '\['{pyexe}', '-sSE', '-c', 'import pandas'\]' " + "returned non-zero exit status 1." + ) + + with pytest.raises(subprocess.CalledProcessError, match=msg) as exc: + subprocess.check_output(call, stderr=subprocess.STDOUT) + + output = exc.value.stdout.decode() + for name in ["numpy", "pytz", "dateutil"]: + assert name in output + + +def test_frame_setitem_dask_array_into_new_col(): + # GH#47128 + + # dask sets "compute.use_numexpr" to False, so catch the current value + # and ensure to reset it afterwards to avoid impacting other tests + olduse = pd.get_option("compute.use_numexpr") + + try: + da = pytest.importorskip("dask.array") + + dda = da.array([1, 2]) + df = DataFrame({"a": ["a", "b"]}) + df["b"] = dda + df["c"] = dda + df.loc[[False, True], "b"] = 100 + result = df.loc[[1], :] + expected = DataFrame({"a": ["b"], "b": [100], "c": [2]}, index=[1]) + tm.assert_frame_equal(result, expected) + finally: + pd.set_option("compute.use_numexpr", olduse) + + +def test_pandas_priority(): + # GH#48347 + + class MyClass: + __pandas_priority__ = 5000 + + def __radd__(self, other): + return self + + left = MyClass() + right = Series(range(3)) + + assert right.__add__(left) is NotImplemented + assert right + left is left + + +@pytest.fixture( + params=[ + "memoryview", + "array", + pytest.param("dask", marks=td.skip_if_no("dask.array")), + pytest.param("xarray", marks=td.skip_if_no("xarray")), + ] +) +def array_likes(request): + """ + Fixture giving a numpy array and a parametrized 'data' object, which can + be a memoryview, array, dask or xarray object created from the numpy array. + """ + # GH#24539 recognize e.g xarray, dask, ... + arr = np.array([1, 2, 3], dtype=np.int64) + + name = request.param + if name == "memoryview": + data = memoryview(arr) + elif name == "array": + data = array.array("i", arr) + elif name == "dask": + import dask.array + + data = dask.array.array(arr) + elif name == "xarray": + import xarray as xr + + data = xr.DataArray(arr) + + return arr, data + + +@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) +def test_from_obscure_array(dtype, array_likes): + # GH#24539 recognize e.g xarray, dask, ... + # Note: we dont do this for PeriodArray bc _from_sequence won't accept + # an array of integers + # TODO: could check with arraylike of Period objects + arr, data = array_likes + + cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype] + + depr_msg = f"{cls.__name__}.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + expected = cls(arr) + result = cls._from_sequence(data, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + if not isinstance(data, memoryview): + # FIXME(GH#44431) these raise on memoryview and attempted fix + # fails on py3.10 + func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype] + result = func(arr).array + expected = func(data).array + tm.assert_equal(result, expected) + + # Let's check the Indexes while we're here + idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype] + result = idx_cls(arr) + expected = idx_cls(data) + tm.assert_index_equal(result, expected) + + +def test_dataframe_consortium() -> None: + """ + Test some basic methods of the dataframe consortium standard. + + Full testing is done at https://github.com/data-apis/dataframe-api-compat, + this is just to check that the entry point works as expected. + """ + pytest.importorskip("dataframe_api_compat") + df_pd = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = df_pd.__dataframe_consortium_standard__() + result_1 = df.get_column_names() + expected_1 = ["a", "b"] + assert result_1 == expected_1 + + ser = Series([1, 2, 3], name="a") + col = ser.__column_consortium_standard__() + assert col.name == "a" + + +def test_xarray_coerce_unit(): + # GH44053 + xr = pytest.importorskip("xarray") + + arr = xr.DataArray([1, 2, 3]) + result = pd.to_datetime(arr, unit="ns") + expected = DatetimeIndex( + [ + "1970-01-01 00:00:00.000000001", + "1970-01-01 00:00:00.000000002", + "1970-01-01 00:00:00.000000003", + ], + dtype="datetime64[ns]", + freq=None, + ) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_multilevel.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_multilevel.py new file mode 100644 index 0000000000000000000000000000000000000000..6644ec82fab17ac9e1c1744b595c38fda17114f5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_multilevel.py @@ -0,0 +1,355 @@ +import datetime + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, +) +import pandas._testing as tm + + +class TestMultiLevel: + def test_reindex_level(self, multiindex_year_month_day_dataframe_random_data): + # axis=0 + ymd = multiindex_year_month_day_dataframe_random_data + + month_sums = ymd.groupby("month").sum() + result = month_sums.reindex(ymd.index, level=1) + expected = ymd.groupby(level="month").transform("sum") + + tm.assert_frame_equal(result, expected) + + # Series + result = month_sums["A"].reindex(ymd.index, level=1) + expected = ymd["A"].groupby(level="month").transform("sum") + tm.assert_series_equal(result, expected, check_names=False) + + # axis=1 + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = ymd.T.groupby("month", axis=1) + + month_sums = gb.sum() + result = month_sums.reindex(columns=ymd.index, level=1) + expected = ymd.groupby(level="month").transform("sum").T + tm.assert_frame_equal(result, expected) + + def test_reindex(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + expected = frame.iloc[[0, 3]] + reindexed = frame.loc[[("foo", "one"), ("bar", "one")]] + tm.assert_frame_equal(reindexed, expected) + + def test_reindex_preserve_levels( + self, multiindex_year_month_day_dataframe_random_data, using_copy_on_write + ): + ymd = multiindex_year_month_day_dataframe_random_data + + new_index = ymd.index[::10] + chunk = ymd.reindex(new_index) + if using_copy_on_write: + assert chunk.index.is_(new_index) + else: + assert chunk.index is new_index + + chunk = ymd.loc[new_index] + assert chunk.index.equals(new_index) + + ymdT = ymd.T + chunk = ymdT.reindex(columns=new_index) + if using_copy_on_write: + assert chunk.columns.is_(new_index) + else: + assert chunk.columns is new_index + + chunk = ymdT.loc[:, new_index] + assert chunk.columns.equals(new_index) + + def test_groupby_transform(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + s = frame["A"] + grouper = s.index.get_level_values(0) + + grouped = s.groupby(grouper, group_keys=False) + + applied = grouped.apply(lambda x: x * 2) + expected = grouped.transform(lambda x: x * 2) + result = applied.reindex(expected.index) + tm.assert_series_equal(result, expected, check_names=False) + + def test_groupby_corner(self): + midx = MultiIndex( + levels=[["foo"], ["bar"], ["baz"]], + codes=[[0], [0], [0]], + names=["one", "two", "three"], + ) + df = DataFrame( + [np.random.default_rng(2).random(4)], + columns=["a", "b", "c", "d"], + index=midx, + ) + # should work + df.groupby(level="three") + + def test_groupby_level_no_obs(self): + # #1697 + midx = MultiIndex.from_tuples( + [ + ("f1", "s1"), + ("f1", "s2"), + ("f2", "s1"), + ("f2", "s2"), + ("f3", "s1"), + ("f3", "s2"), + ] + ) + df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx) + df1 = df.loc(axis=1)[df.columns.map(lambda u: u[0] in ["f2", "f3"])] + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = df1.groupby(axis=1, level=0) + result = grouped.sum() + assert (result.columns == ["f2", "f3"]).all() + + def test_setitem_with_expansion_multiindex_columns( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + df = ymd[:5].T + df[2000, 1, 10] = df[2000, 1, 7] + assert isinstance(df.columns, MultiIndex) + assert (df[2000, 1, 10] == df[2000, 1, 7]).all() + + def test_alignment(self): + x = Series( + data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]) + ) + + y = Series( + data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]) + ) + + res = x - y + exp_index = x.index.union(y.index) + exp = x.reindex(exp_index) - y.reindex(exp_index) + tm.assert_series_equal(res, exp) + + # hit non-monotonic code path + res = x[::-1] - y[::-1] + exp_index = x.index.union(y.index) + exp = x.reindex(exp_index) - y.reindex(exp_index) + tm.assert_series_equal(res, exp) + + def test_groupby_multilevel(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + + result = ymd.groupby(level=[0, 1]).mean() + + k1 = ymd.index.get_level_values(0) + k2 = ymd.index.get_level_values(1) + + expected = ymd.groupby([k1, k2]).mean() + + # TODO groupby with level_values drops names + tm.assert_frame_equal(result, expected, check_names=False) + assert result.index.names == ymd.index.names[:2] + + result2 = ymd.groupby(level=ymd.index.names[:2]).mean() + tm.assert_frame_equal(result, result2) + + def test_multilevel_consolidate(self): + index = MultiIndex.from_tuples( + [("foo", "one"), ("foo", "two"), ("bar", "one"), ("bar", "two")] + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), index=index, columns=index + ) + df["Totals", ""] = df.sum(1) + df = df._consolidate() + + def test_level_with_tuples(self): + index = MultiIndex( + levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + ) + + series = Series(np.random.default_rng(2).standard_normal(6), index=index) + frame = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index) + + result = series[("foo", "bar", 0)] + result2 = series.loc[("foo", "bar", 0)] + expected = series[:2] + expected.index = expected.index.droplevel(0) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"): + series[("foo", "bar", 0), 2] + + result = frame.loc[("foo", "bar", 0)] + result2 = frame.xs(("foo", "bar", 0)) + expected = frame[:2] + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + index = MultiIndex( + levels=[[("foo", "bar"), ("foo", "baz"), ("foo", "qux")], [0, 1]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + ) + + series = Series(np.random.default_rng(2).standard_normal(6), index=index) + frame = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index) + + result = series[("foo", "bar")] + result2 = series.loc[("foo", "bar")] + expected = series[:2] + expected.index = expected.index.droplevel(0) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + result = frame.loc[("foo", "bar")] + result2 = frame.xs(("foo", "bar")) + expected = frame[:2] + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.reindex(["foo", "qux"], level=0) + expected = frame.iloc[[0, 1, 2, 7, 8, 9]] + tm.assert_frame_equal(result, expected) + + result = frame.T.reindex(["foo", "qux"], axis=1, level=0) + tm.assert_frame_equal(result, expected.T) + + result = frame.loc[["foo", "qux"]] + tm.assert_frame_equal(result, expected) + + result = frame["A"].loc[["foo", "qux"]] + tm.assert_series_equal(result, expected["A"]) + + result = frame.T.loc[:, ["foo", "qux"]] + tm.assert_frame_equal(result, expected.T) + + @pytest.mark.parametrize("d", [4, "d"]) + def test_empty_frame_groupby_dtypes_consistency(self, d): + # GH 20888 + group_keys = ["a", "b", "c"] + df = DataFrame({"a": [1], "b": [2], "c": [3], "d": [d]}) + + g = df[df.a == 2].groupby(group_keys) + result = g.first().index + expected = MultiIndex( + levels=[[1], [2], [3]], codes=[[], [], []], names=["a", "b", "c"] + ) + + tm.assert_index_equal(result, expected) + + def test_duplicate_groupby_issues(self): + idx_tp = [ + ("600809", "20061231"), + ("600809", "20070331"), + ("600809", "20070630"), + ("600809", "20070331"), + ] + dt = ["demo", "demo", "demo", "demo"] + + idx = MultiIndex.from_tuples(idx_tp, names=["STK_ID", "RPT_Date"]) + s = Series(dt, index=idx) + + result = s.groupby(s.index).first() + assert len(result) == 3 + + def test_subsets_multiindex_dtype(self): + # GH 20757 + data = [["x", 1]] + columns = [("a", "b", np.nan), ("a", "c", 0.0)] + df = DataFrame(data, columns=MultiIndex.from_tuples(columns)) + expected = df.dtypes.a.b + result = df.a.b.dtypes + tm.assert_series_equal(result, expected) + + def test_datetime_object_multiindex(self): + data_dic = { + (0, datetime.date(2018, 3, 3)): {"A": 1, "B": 10}, + (0, datetime.date(2018, 3, 4)): {"A": 2, "B": 11}, + (1, datetime.date(2018, 3, 3)): {"A": 3, "B": 12}, + (1, datetime.date(2018, 3, 4)): {"A": 4, "B": 13}, + } + result = DataFrame.from_dict(data_dic, orient="index") + data = {"A": [1, 2, 3, 4], "B": [10, 11, 12, 13]} + index = [ + [0, 0, 1, 1], + [ + datetime.date(2018, 3, 3), + datetime.date(2018, 3, 4), + datetime.date(2018, 3, 3), + datetime.date(2018, 3, 4), + ], + ] + expected = DataFrame(data=data, index=index) + + tm.assert_frame_equal(result, expected) + + def test_multiindex_with_na(self): + df = DataFrame( + [ + ["A", np.nan, 1.23, 4.56], + ["A", "G", 1.23, 4.56], + ["A", "D", 9.87, 10.54], + ], + columns=["pivot_0", "pivot_1", "col_1", "col_2"], + ).set_index(["pivot_0", "pivot_1"]) + + df.at[("A", "F"), "col_2"] = 0.0 + + expected = DataFrame( + [ + ["A", np.nan, 1.23, 4.56], + ["A", "G", 1.23, 4.56], + ["A", "D", 9.87, 10.54], + ["A", "F", np.nan, 0.0], + ], + columns=["pivot_0", "pivot_1", "col_1", "col_2"], + ).set_index(["pivot_0", "pivot_1"]) + + tm.assert_frame_equal(df, expected) + + +class TestSorted: + """everything you wanted to test about sorting""" + + def test_sort_non_lexsorted(self): + # degenerate case where we sort but don't + # have a satisfying result :< + # GH 15797 + idx = MultiIndex( + [["A", "B", "C"], ["c", "b", "a"]], [[0, 1, 2, 0, 1, 2], [0, 2, 1, 1, 0, 2]] + ) + + df = DataFrame({"col": range(len(idx))}, index=idx, dtype="int64") + assert df.index.is_monotonic_increasing is False + + sorted = df.sort_index() + assert sorted.index.is_monotonic_increasing is True + + expected = DataFrame( + {"col": [1, 4, 5, 2]}, + index=MultiIndex.from_tuples( + [("B", "a"), ("B", "c"), ("C", "a"), ("C", "b")] + ), + dtype="int64", + ) + result = sorted.loc[pd.IndexSlice["B":"C", "a":"c"], :] + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..52b5f636b1254ceddf869704a6378f4ea5012b8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py @@ -0,0 +1,100 @@ +import sys +import types + +import pytest + +from pandas.compat._optional import ( + VERSIONS, + import_optional_dependency, +) + +import pandas._testing as tm + + +def test_import_optional(): + match = "Missing .*notapackage.* pip .* conda .* notapackage" + with pytest.raises(ImportError, match=match) as exc_info: + import_optional_dependency("notapackage") + # The original exception should be there as context: + assert isinstance(exc_info.value.__context__, ImportError) + + result = import_optional_dependency("notapackage", errors="ignore") + assert result is None + + +def test_xlrd_version_fallback(): + pytest.importorskip("xlrd") + import_optional_dependency("xlrd") + + +def test_bad_version(monkeypatch): + name = "fakemodule" + module = types.ModuleType(name) + module.__version__ = "0.9.0" + sys.modules[name] = module + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'" + with pytest.raises(ImportError, match=match): + import_optional_dependency("fakemodule") + + # Test min_version parameter + result = import_optional_dependency("fakemodule", min_version="0.8") + assert result is module + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency("fakemodule", errors="warn") + assert result is None + + module.__version__ = "1.0.0" # exact match is OK + result = import_optional_dependency("fakemodule") + assert result is module + + with pytest.raises(ImportError, match="Pandas requires version '1.1.0'"): + import_optional_dependency("fakemodule", min_version="1.1.0") + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency( + "fakemodule", errors="warn", min_version="1.1.0" + ) + assert result is None + + result = import_optional_dependency( + "fakemodule", errors="ignore", min_version="1.1.0" + ) + assert result is None + + +def test_submodule(monkeypatch): + # Create a fake module with a submodule + name = "fakemodule" + module = types.ModuleType(name) + module.__version__ = "0.9.0" + sys.modules[name] = module + sub_name = "submodule" + submodule = types.ModuleType(sub_name) + setattr(module, sub_name, submodule) + sys.modules[f"{name}.{sub_name}"] = submodule + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'" + with pytest.raises(ImportError, match=match): + import_optional_dependency("fakemodule.submodule") + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency("fakemodule.submodule", errors="warn") + assert result is None + + module.__version__ = "1.0.0" # exact match is OK + result = import_optional_dependency("fakemodule.submodule") + assert result is submodule + + +def test_no_version_raises(monkeypatch): + name = "fakemodule" + module = types.ModuleType(name) + sys.modules[name] = module + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + with pytest.raises(ImportError, match="Can't determine .* fakemodule"): + import_optional_dependency(name) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..4e569dc40005d5883870b82f46859d5bc36578f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_register_accessor.py @@ -0,0 +1,103 @@ +from collections.abc import Generator +import contextlib + +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core import accessor + + +def test_dirname_mixin() -> None: + # GH37173 + + class X(accessor.DirNamesMixin): + x = 1 + y: int + + def __init__(self) -> None: + self.z = 3 + + result = [attr_name for attr_name in dir(X()) if not attr_name.startswith("_")] + + assert result == ["x", "z"] + + +@contextlib.contextmanager +def ensure_removed(obj, attr) -> Generator[None, None, None]: + """Ensure that an attribute added to 'obj' during the test is + removed when we're done + """ + try: + yield + finally: + try: + delattr(obj, attr) + except AttributeError: + pass + obj._accessors.discard(attr) + + +class MyAccessor: + def __init__(self, obj) -> None: + self.obj = obj + self.item = "item" + + @property + def prop(self): + return self.item + + def method(self): + return self.item + + +@pytest.mark.parametrize( + "obj, registrar", + [ + (pd.Series, pd.api.extensions.register_series_accessor), + (pd.DataFrame, pd.api.extensions.register_dataframe_accessor), + (pd.Index, pd.api.extensions.register_index_accessor), + ], +) +def test_register(obj, registrar): + with ensure_removed(obj, "mine"): + before = set(dir(obj)) + registrar("mine")(MyAccessor) + o = obj([]) if obj is not pd.Series else obj([], dtype=object) + assert o.mine.prop == "item" + after = set(dir(obj)) + assert (before ^ after) == {"mine"} + assert "mine" in obj._accessors + + +def test_accessor_works(): + with ensure_removed(pd.Series, "mine"): + pd.api.extensions.register_series_accessor("mine")(MyAccessor) + + s = pd.Series([1, 2]) + assert s.mine.obj is s + + assert s.mine.prop == "item" + assert s.mine.method() == "item" + + +def test_overwrite_warns(): + match = r".*MyAccessor.*fake.*Series.*" + with tm.assert_produces_warning(UserWarning, match=match): + with ensure_removed(pd.Series, "fake"): + setattr(pd.Series, "fake", 123) + pd.api.extensions.register_series_accessor("fake")(MyAccessor) + s = pd.Series([1, 2]) + assert s.fake.prop == "item" + + +def test_raises_attribute_error(): + with ensure_removed(pd.Series, "bad"): + + @pd.api.extensions.register_series_accessor("bad") + class Bad: + def __init__(self, data) -> None: + raise AttributeError("whoops") + + with pytest.raises(AttributeError, match="whoops"): + pd.Series([], dtype=object).bad diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_take.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_take.py new file mode 100644 index 0000000000000000000000000000000000000000..4f34ab34c35f0c2446597001f59526d4c8b0900d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/test_take.py @@ -0,0 +1,307 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas._libs import iNaT + +import pandas._testing as tm +import pandas.core.algorithms as algos + + +@pytest.fixture( + params=[ + (np.int8, np.int16(127), np.int8), + (np.int8, np.int16(128), np.int16), + (np.int32, 1, np.int32), + (np.int32, 2.0, np.float64), + (np.int32, 3.0 + 4.0j, np.complex128), + (np.int32, True, np.object_), + (np.int32, "", np.object_), + (np.float64, 1, np.float64), + (np.float64, 2.0, np.float64), + (np.float64, 3.0 + 4.0j, np.complex128), + (np.float64, True, np.object_), + (np.float64, "", np.object_), + (np.complex128, 1, np.complex128), + (np.complex128, 2.0, np.complex128), + (np.complex128, 3.0 + 4.0j, np.complex128), + (np.complex128, True, np.object_), + (np.complex128, "", np.object_), + (np.bool_, 1, np.object_), + (np.bool_, 2.0, np.object_), + (np.bool_, 3.0 + 4.0j, np.object_), + (np.bool_, True, np.bool_), + (np.bool_, "", np.object_), + ] +) +def dtype_fill_out_dtype(request): + return request.param + + +class TestTake: + def test_1d_fill_nonna(self, dtype_fill_out_dtype): + dtype, fill_value, out_dtype = dtype_fill_out_dtype + data = np.random.default_rng(2).integers(0, 2, 4).astype(dtype) + indexer = [2, 1, 0, -1] + + result = algos.take_nd(data, indexer, fill_value=fill_value) + assert (result[[0, 1, 2]] == data[[2, 1, 0]]).all() + assert result[3] == fill_value + assert result.dtype == out_dtype + + indexer = [2, 1, 0, 1] + + result = algos.take_nd(data, indexer, fill_value=fill_value) + assert (result[[0, 1, 2, 3]] == data[indexer]).all() + assert result.dtype == dtype + + def test_2d_fill_nonna(self, dtype_fill_out_dtype): + dtype, fill_value, out_dtype = dtype_fill_out_dtype + data = np.random.default_rng(2).integers(0, 2, (5, 3)).astype(dtype) + indexer = [2, 1, 0, -1] + + result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) + assert (result[[0, 1, 2], :] == data[[2, 1, 0], :]).all() + assert (result[3, :] == fill_value).all() + assert result.dtype == out_dtype + + result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) + assert (result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all() + assert (result[:, 3] == fill_value).all() + assert result.dtype == out_dtype + + indexer = [2, 1, 0, 1] + result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) + assert (result[[0, 1, 2, 3], :] == data[indexer, :]).all() + assert result.dtype == dtype + + result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) + assert (result[:, [0, 1, 2, 3]] == data[:, indexer]).all() + assert result.dtype == dtype + + def test_3d_fill_nonna(self, dtype_fill_out_dtype): + dtype, fill_value, out_dtype = dtype_fill_out_dtype + + data = np.random.default_rng(2).integers(0, 2, (5, 4, 3)).astype(dtype) + indexer = [2, 1, 0, -1] + + result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) + assert (result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all() + assert (result[3, :, :] == fill_value).all() + assert result.dtype == out_dtype + + result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) + assert (result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all() + assert (result[:, 3, :] == fill_value).all() + assert result.dtype == out_dtype + + result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value) + assert (result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all() + assert (result[:, :, 3] == fill_value).all() + assert result.dtype == out_dtype + + indexer = [2, 1, 0, 1] + result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) + assert (result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all() + assert result.dtype == dtype + + result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) + assert (result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all() + assert result.dtype == dtype + + result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value) + assert (result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all() + assert result.dtype == dtype + + def test_1d_other_dtypes(self): + arr = np.random.default_rng(2).standard_normal(10).astype(np.float32) + + indexer = [1, 2, 3, -1] + result = algos.take_nd(arr, indexer) + expected = arr.take(indexer) + expected[-1] = np.nan + tm.assert_almost_equal(result, expected) + + def test_2d_other_dtypes(self): + arr = np.random.default_rng(2).standard_normal((10, 5)).astype(np.float32) + + indexer = [1, 2, 3, -1] + + # axis=0 + result = algos.take_nd(arr, indexer, axis=0) + expected = arr.take(indexer, axis=0) + expected[-1] = np.nan + tm.assert_almost_equal(result, expected) + + # axis=1 + result = algos.take_nd(arr, indexer, axis=1) + expected = arr.take(indexer, axis=1) + expected[:, -1] = np.nan + tm.assert_almost_equal(result, expected) + + def test_1d_bool(self): + arr = np.array([0, 1, 0], dtype=bool) + + result = algos.take_nd(arr, [0, 2, 2, 1]) + expected = arr.take([0, 2, 2, 1]) + tm.assert_numpy_array_equal(result, expected) + + result = algos.take_nd(arr, [0, 2, -1]) + assert result.dtype == np.object_ + + def test_2d_bool(self): + arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool) + + result = algos.take_nd(arr, [0, 2, 2, 1]) + expected = arr.take([0, 2, 2, 1], axis=0) + tm.assert_numpy_array_equal(result, expected) + + result = algos.take_nd(arr, [0, 2, 2, 1], axis=1) + expected = arr.take([0, 2, 2, 1], axis=1) + tm.assert_numpy_array_equal(result, expected) + + result = algos.take_nd(arr, [0, 2, -1]) + assert result.dtype == np.object_ + + def test_2d_float32(self): + arr = np.random.default_rng(2).standard_normal((4, 3)).astype(np.float32) + indexer = [0, 2, -1, 1, -1] + + # axis=0 + result = algos.take_nd(arr, indexer, axis=0) + + expected = arr.take(indexer, axis=0) + expected[[2, 4], :] = np.nan + tm.assert_almost_equal(result, expected) + + # axis=1 + result = algos.take_nd(arr, indexer, axis=1) + expected = arr.take(indexer, axis=1) + expected[:, [2, 4]] = np.nan + tm.assert_almost_equal(result, expected) + + def test_2d_datetime64(self): + # 2005/01/01 - 2006/01/01 + arr = ( + np.random.default_rng(2).integers(11_045_376, 11_360_736, (5, 3)) + * 100_000_000_000 + ) + arr = arr.view(dtype="datetime64[ns]") + indexer = [0, 2, -1, 1, -1] + + # axis=0 + result = algos.take_nd(arr, indexer, axis=0) + expected = arr.take(indexer, axis=0) + expected.view(np.int64)[[2, 4], :] = iNaT + tm.assert_almost_equal(result, expected) + + result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1)) + expected = arr.take(indexer, axis=0) + expected[[2, 4], :] = datetime(2007, 1, 1) + tm.assert_almost_equal(result, expected) + + # axis=1 + result = algos.take_nd(arr, indexer, axis=1) + expected = arr.take(indexer, axis=1) + expected.view(np.int64)[:, [2, 4]] = iNaT + tm.assert_almost_equal(result, expected) + + result = algos.take_nd(arr, indexer, axis=1, fill_value=datetime(2007, 1, 1)) + expected = arr.take(indexer, axis=1) + expected[:, [2, 4]] = datetime(2007, 1, 1) + tm.assert_almost_equal(result, expected) + + def test_take_axis_0(self): + arr = np.arange(12).reshape(4, 3) + result = algos.take(arr, [0, -1]) + expected = np.array([[0, 1, 2], [9, 10, 11]]) + tm.assert_numpy_array_equal(result, expected) + + # allow_fill=True + result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0) + expected = np.array([[0, 1, 2], [0, 0, 0]]) + tm.assert_numpy_array_equal(result, expected) + + def test_take_axis_1(self): + arr = np.arange(12).reshape(4, 3) + result = algos.take(arr, [0, -1], axis=1) + expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]]) + tm.assert_numpy_array_equal(result, expected) + + # allow_fill=True + result = algos.take(arr, [0, -1], axis=1, allow_fill=True, fill_value=0) + expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]]) + tm.assert_numpy_array_equal(result, expected) + + # GH#26976 make sure we validate along the correct axis + with pytest.raises(IndexError, match="indices are out-of-bounds"): + algos.take(arr, [0, 3], axis=1, allow_fill=True, fill_value=0) + + def test_take_non_hashable_fill_value(self): + arr = np.array([1, 2, 3]) + indexer = np.array([1, -1]) + with pytest.raises(ValueError, match="fill_value must be a scalar"): + algos.take(arr, indexer, allow_fill=True, fill_value=[1]) + + # with object dtype it is allowed + arr = np.array([1, 2, 3], dtype=object) + result = algos.take(arr, indexer, allow_fill=True, fill_value=[1]) + expected = np.array([2, [1]], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +class TestExtensionTake: + # The take method found in pd.api.extensions + + def test_bounds_check_large(self): + arr = np.array([1, 2]) + + msg = "indices are out-of-bounds" + with pytest.raises(IndexError, match=msg): + algos.take(arr, [2, 3], allow_fill=True) + + msg = "index 2 is out of bounds for( axis 0 with)? size 2" + with pytest.raises(IndexError, match=msg): + algos.take(arr, [2, 3], allow_fill=False) + + def test_bounds_check_small(self): + arr = np.array([1, 2, 3], dtype=np.int64) + indexer = [0, -1, -2] + + msg = r"'indices' contains values less than allowed \(-2 < -1\)" + with pytest.raises(ValueError, match=msg): + algos.take(arr, indexer, allow_fill=True) + + result = algos.take(arr, indexer) + expected = np.array([1, 3, 2], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("allow_fill", [True, False]) + def test_take_empty(self, allow_fill): + arr = np.array([], dtype=np.int64) + # empty take is ok + result = algos.take(arr, [], allow_fill=allow_fill) + tm.assert_numpy_array_equal(arr, result) + + msg = "|".join( + [ + "cannot do a non-empty take from an empty axes.", + "indices are out-of-bounds", + ] + ) + with pytest.raises(IndexError, match=msg): + algos.take(arr, [0], allow_fill=allow_fill) + + def test_take_na_empty(self): + result = algos.take(np.array([]), [-1, -1], allow_fill=True, fill_value=0.0) + expected = np.array([0.0, 0.0]) + tm.assert_numpy_array_equal(result, expected) + + def test_take_coerces_list(self): + arr = [1, 2, 3] + msg = "take accepting non-standard inputs is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = algos.take(arr, [0, 0]) + expected = np.array([1, 1]) + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9243c3df7998b1149a5efe62c1c1b02c1dab96e9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/frequencies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/frequencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e806c134e4998480e4d22ad53c6d3552c8d6bfb3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/frequencies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/offsets.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/offsets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dabe304f24bb848a8c86685046ee07e2948abca Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tseries/__pycache__/offsets.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..82b3aa56c653cd1241872c67e9d9016df04a6c5a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/util/__init__.py @@ -0,0 +1,29 @@ +def __getattr__(key: str): + # These imports need to be lazy to avoid circular import errors + if key == "hash_array": + from pandas.core.util.hashing import hash_array + + return hash_array + if key == "hash_pandas_object": + from pandas.core.util.hashing import hash_pandas_object + + return hash_pandas_object + if key == "Appender": + from pandas.util._decorators import Appender + + return Appender + if key == "Substitution": + from pandas.util._decorators import Substitution + + return Substitution + + if key == "cache_readonly": + from pandas.util._decorators import cache_readonly + + return cache_readonly + + raise AttributeError(f"module 'pandas.util' has no attribute '{key}'") + + +def capitalize_first_letter(s): + return s[:1].upper() + s[1:] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8903d7191986aab4cc4de8af08425eff98a306d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_decorators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad78623894c995d6275e92a3dd20c7895de68362 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_decorators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_print_versions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_print_versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2821dfc228d2aed7db0ccfd5c7dae97de9b758bf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_print_versions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_tester.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_tester.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23449f93243889761563c1123092e62c5c912834 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_tester.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_validators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_validators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c867037cf7658388f9b83073a369abd4d065e485 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/util/__pycache__/_validators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/_decorators.py b/env-llmeval/lib/python3.10/site-packages/pandas/util/_decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..4e8189e72c427d75da0367532edfe04d0a7581e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/util/_decorators.py @@ -0,0 +1,508 @@ +from __future__ import annotations + +from functools import wraps +import inspect +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, +) +import warnings + +from pandas._libs.properties import cache_readonly +from pandas._typing import ( + F, + T, +) +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + from collections.abc import Mapping + + +def deprecate( + name: str, + alternative: Callable[..., Any], + version: str, + alt_name: str | None = None, + klass: type[Warning] | None = None, + stacklevel: int = 2, + msg: str | None = None, +) -> Callable[[F], F]: + """ + Return a new function that emits a deprecation warning on use. + + To use this method for a deprecated function, another function + `alternative` with the same signature must exist. The deprecated + function will emit a deprecation warning, and in the docstring + it will contain the deprecation directive with the provided version + so it can be detected for future removal. + + Parameters + ---------- + name : str + Name of function to deprecate. + alternative : func + Function to use instead. + version : str + Version of pandas in which the method has been deprecated. + alt_name : str, optional + Name to use in preference of alternative.__name__. + klass : Warning, default FutureWarning + stacklevel : int, default 2 + msg : str + The message to display in the warning. + Default is '{name} is deprecated. Use {alt_name} instead.' + """ + alt_name = alt_name or alternative.__name__ + klass = klass or FutureWarning + warning_msg = msg or f"{name} is deprecated, use {alt_name} instead." + + @wraps(alternative) + def wrapper(*args, **kwargs) -> Callable[..., Any]: + warnings.warn(warning_msg, klass, stacklevel=stacklevel) + return alternative(*args, **kwargs) + + # adding deprecated directive to the docstring + msg = msg or f"Use `{alt_name}` instead." + doc_error_msg = ( + "deprecate needs a correctly formatted docstring in " + "the target function (should have a one liner short " + "summary, and opening quotes should be in their own " + f"line). Found:\n{alternative.__doc__}" + ) + + # when python is running in optimized mode (i.e. `-OO`), docstrings are + # removed, so we check that a docstring with correct formatting is used + # but we allow empty docstrings + if alternative.__doc__: + if alternative.__doc__.count("\n") < 3: + raise AssertionError(doc_error_msg) + empty1, summary, empty2, doc_string = alternative.__doc__.split("\n", 3) + if empty1 or empty2 and not summary: + raise AssertionError(doc_error_msg) + wrapper.__doc__ = dedent( + f""" + {summary.strip()} + + .. deprecated:: {version} + {msg} + + {dedent(doc_string)}""" + ) + # error: Incompatible return value type (got "Callable[[VarArg(Any), KwArg(Any)], + # Callable[...,Any]]", expected "Callable[[F], F]") + return wrapper # type: ignore[return-value] + + +def deprecate_kwarg( + old_arg_name: str, + new_arg_name: str | None, + mapping: Mapping[Any, Any] | Callable[[Any], Any] | None = None, + stacklevel: int = 2, +) -> Callable[[F], F]: + """ + Decorator to deprecate a keyword argument of a function. + + Parameters + ---------- + old_arg_name : str + Name of argument in function to deprecate + new_arg_name : str or None + Name of preferred argument in function. Use None to raise warning that + ``old_arg_name`` keyword is deprecated. + mapping : dict or callable + If mapping is present, use it to translate old arguments to + new arguments. A callable must do its own value checking; + values not found in a dict will be forwarded unchanged. + + Examples + -------- + The following deprecates 'cols', using 'columns' instead + + >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') + ... def f(columns=''): + ... print(columns) + ... + >>> f(columns='should work ok') + should work ok + + >>> f(cols='should raise warning') # doctest: +SKIP + FutureWarning: cols is deprecated, use columns instead + warnings.warn(msg, FutureWarning) + should raise warning + + >>> f(cols='should error', columns="can\'t pass do both") # doctest: +SKIP + TypeError: Can only specify 'cols' or 'columns', not both + + >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) + ... def f(new=False): + ... print('yes!' if new else 'no!') + ... + >>> f(old='yes') # doctest: +SKIP + FutureWarning: old='yes' is deprecated, use new=True instead + warnings.warn(msg, FutureWarning) + yes! + + To raise a warning that a keyword will be removed entirely in the future + + >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) + ... def f(cols='', another_param=''): + ... print(cols) + ... + >>> f(cols='should raise warning') # doctest: +SKIP + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning + >>> f(another_param='should not raise warning') # doctest: +SKIP + should not raise warning + + >>> f(cols='should raise warning', another_param='') # doctest: +SKIP + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning + """ + if mapping is not None and not hasattr(mapping, "get") and not callable(mapping): + raise TypeError( + "mapping from old to new argument values must be dict or callable!" + ) + + def _deprecate_kwarg(func: F) -> F: + @wraps(func) + def wrapper(*args, **kwargs) -> Callable[..., Any]: + old_arg_value = kwargs.pop(old_arg_name, None) + + if old_arg_value is not None: + if new_arg_name is None: + msg = ( + f"the {repr(old_arg_name)} keyword is deprecated and " + "will be removed in a future version. Please take " + f"steps to stop the use of {repr(old_arg_name)}" + ) + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + kwargs[old_arg_name] = old_arg_value + return func(*args, **kwargs) + + elif mapping is not None: + if callable(mapping): + new_arg_value = mapping(old_arg_value) + else: + new_arg_value = mapping.get(old_arg_value, old_arg_value) + msg = ( + f"the {old_arg_name}={repr(old_arg_value)} keyword is " + "deprecated, use " + f"{new_arg_name}={repr(new_arg_value)} instead." + ) + else: + new_arg_value = old_arg_value + msg = ( + f"the {repr(old_arg_name)} keyword is deprecated, " + f"use {repr(new_arg_name)} instead." + ) + + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + if kwargs.get(new_arg_name) is not None: + msg = ( + f"Can only specify {repr(old_arg_name)} " + f"or {repr(new_arg_name)}, not both." + ) + raise TypeError(msg) + kwargs[new_arg_name] = new_arg_value + return func(*args, **kwargs) + + return cast(F, wrapper) + + return _deprecate_kwarg + + +def _format_argument_list(allow_args: list[str]) -> str: + """ + Convert the allow_args argument (either string or integer) of + `deprecate_nonkeyword_arguments` function to a string describing + it to be inserted into warning message. + + Parameters + ---------- + allowed_args : list, tuple or int + The `allowed_args` argument for `deprecate_nonkeyword_arguments`, + but None value is not allowed. + + Returns + ------- + str + The substring describing the argument list in best way to be + inserted to the warning message. + + Examples + -------- + `format_argument_list([])` -> '' + `format_argument_list(['a'])` -> "except for the arguments 'a'" + `format_argument_list(['a', 'b'])` -> "except for the arguments 'a' and 'b'" + `format_argument_list(['a', 'b', 'c'])` -> + "except for the arguments 'a', 'b' and 'c'" + """ + if "self" in allow_args: + allow_args.remove("self") + if not allow_args: + return "" + elif len(allow_args) == 1: + return f" except for the argument '{allow_args[0]}'" + else: + last = allow_args[-1] + args = ", ".join(["'" + x + "'" for x in allow_args[:-1]]) + return f" except for the arguments {args} and '{last}'" + + +def future_version_msg(version: str | None) -> str: + """Specify which version of pandas the deprecation will take place in.""" + if version is None: + return "In a future version of pandas" + else: + return f"Starting with pandas version {version}" + + +def deprecate_nonkeyword_arguments( + version: str | None, + allowed_args: list[str] | None = None, + name: str | None = None, +) -> Callable[[F], F]: + """ + Decorator to deprecate a use of non-keyword arguments of a function. + + Parameters + ---------- + version : str, optional + The version in which positional arguments will become + keyword-only. If None, then the warning message won't + specify any particular version. + + allowed_args : list, optional + In case of list, it must be the list of names of some + first arguments of the decorated functions that are + OK to be given as positional arguments. In case of None value, + defaults to list of all arguments not having the + default value. + + name : str, optional + The specific name of the function to show in the warning + message. If None, then the Qualified name of the function + is used. + """ + + def decorate(func): + old_sig = inspect.signature(func) + + if allowed_args is not None: + allow_args = allowed_args + else: + allow_args = [ + p.name + for p in old_sig.parameters.values() + if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.default is p.empty + ] + + new_params = [ + p.replace(kind=p.KEYWORD_ONLY) + if ( + p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.name not in allow_args + ) + else p + for p in old_sig.parameters.values() + ] + new_params.sort(key=lambda p: p.kind) + new_sig = old_sig.replace(parameters=new_params) + + num_allow_args = len(allow_args) + msg = ( + f"{future_version_msg(version)} all arguments of " + f"{name or func.__qualname__}{{arguments}} will be keyword-only." + ) + + @wraps(func) + def wrapper(*args, **kwargs): + if len(args) > num_allow_args: + warnings.warn( + msg.format(arguments=_format_argument_list(allow_args)), + FutureWarning, + stacklevel=find_stack_level(), + ) + return func(*args, **kwargs) + + # error: "Callable[[VarArg(Any), KwArg(Any)], Any]" has no + # attribute "__signature__" + wrapper.__signature__ = new_sig # type: ignore[attr-defined] + return wrapper + + return decorate + + +def doc(*docstrings: None | str | Callable, **params) -> Callable[[F], F]: + """ + A decorator to take docstring templates, concatenate them and perform string + substitution on them. + + This decorator will add a variable "_docstring_components" to the wrapped + callable to keep track the original docstring template for potential usage. + If it should be consider as a template, it will be saved as a string. + Otherwise, it will be saved as callable, and later user __doc__ and dedent + to get docstring. + + Parameters + ---------- + *docstrings : None, str, or callable + The string / docstring / docstring template to be appended in order + after default docstring under callable. + **params + The string which would be used to format docstring template. + """ + + def decorator(decorated: F) -> F: + # collecting docstring and docstring templates + docstring_components: list[str | Callable] = [] + if decorated.__doc__: + docstring_components.append(dedent(decorated.__doc__)) + + for docstring in docstrings: + if docstring is None: + continue + if hasattr(docstring, "_docstring_components"): + docstring_components.extend( + docstring._docstring_components # pyright: ignore[reportGeneralTypeIssues] + ) + elif isinstance(docstring, str) or docstring.__doc__: + docstring_components.append(docstring) + + params_applied = [ + component.format(**params) + if isinstance(component, str) and len(params) > 0 + else component + for component in docstring_components + ] + + decorated.__doc__ = "".join( + [ + component + if isinstance(component, str) + else dedent(component.__doc__ or "") + for component in params_applied + ] + ) + + # error: "F" has no attribute "_docstring_components" + decorated._docstring_components = ( # type: ignore[attr-defined] + docstring_components + ) + return decorated + + return decorator + + +# Substitution and Appender are derived from matplotlib.docstring (1.1.0) +# module https://matplotlib.org/users/license.html + + +class Substitution: + """ + A decorator to take a function's docstring and perform string + substitution on it. + + This decorator should be robust even if func.__doc__ is None + (for example, if -OO was passed to the interpreter) + + Usage: construct a docstring.Substitution with a sequence or + dictionary suitable for performing substitution; then + decorate a suitable function with the constructed object. e.g. + + sub_author_name = Substitution(author='Jason') + + @sub_author_name + def some_function(x): + "%(author)s wrote this function" + + # note that some_function.__doc__ is now "Jason wrote this function" + + One can also use positional arguments. + + sub_first_last_names = Substitution('Edgar Allen', 'Poe') + + @sub_first_last_names + def some_function(x): + "%s %s wrote the Raven" + """ + + def __init__(self, *args, **kwargs) -> None: + if args and kwargs: + raise AssertionError("Only positional or keyword args are allowed") + + self.params = args or kwargs + + def __call__(self, func: F) -> F: + func.__doc__ = func.__doc__ and func.__doc__ % self.params + return func + + def update(self, *args, **kwargs) -> None: + """ + Update self.params with supplied args. + """ + if isinstance(self.params, dict): + self.params.update(*args, **kwargs) + + +class Appender: + """ + A function decorator that will append an addendum to the docstring + of the target function. + + This decorator should be robust even if func.__doc__ is None + (for example, if -OO was passed to the interpreter). + + Usage: construct a docstring.Appender with a string to be joined to + the original docstring. An optional 'join' parameter may be supplied + which will be used to join the docstring and addendum. e.g. + + add_copyright = Appender("Copyright (c) 2009", join='\n') + + @add_copyright + def my_dog(has='fleas'): + "This docstring will have a copyright below" + pass + """ + + addendum: str | None + + def __init__(self, addendum: str | None, join: str = "", indents: int = 0) -> None: + if indents > 0: + self.addendum = indent(addendum, indents=indents) + else: + self.addendum = addendum + self.join = join + + def __call__(self, func: T) -> T: + func.__doc__ = func.__doc__ if func.__doc__ else "" + self.addendum = self.addendum if self.addendum else "" + docitems = [func.__doc__, self.addendum] + func.__doc__ = dedent(self.join.join(docitems)) + return func + + +def indent(text: str | None, indents: int = 1) -> str: + if not text or not isinstance(text, str): + return "" + jointext = "".join(["\n"] + [" "] * indents) + return jointext.join(text.split("\n")) + + +__all__ = [ + "Appender", + "cache_readonly", + "deprecate", + "deprecate_kwarg", + "deprecate_nonkeyword_arguments", + "doc", + "future_version_msg", + "Substitution", +] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/_doctools.py b/env-llmeval/lib/python3.10/site-packages/pandas/util/_doctools.py new file mode 100644 index 0000000000000000000000000000000000000000..12619abf4baaf336dfd3d5ae78a9bc2133f310c0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/util/_doctools.py @@ -0,0 +1,202 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +import pandas as pd + +if TYPE_CHECKING: + from collections.abc import Iterable + + +class TablePlotter: + """ + Layout some DataFrames in vertical/horizontal layout for explanation. + Used in merging.rst + """ + + def __init__( + self, + cell_width: float = 0.37, + cell_height: float = 0.25, + font_size: float = 7.5, + ) -> None: + self.cell_width = cell_width + self.cell_height = cell_height + self.font_size = font_size + + def _shape(self, df: pd.DataFrame) -> tuple[int, int]: + """ + Calculate table shape considering index levels. + """ + row, col = df.shape + return row + df.columns.nlevels, col + df.index.nlevels + + def _get_cells(self, left, right, vertical) -> tuple[int, int]: + """ + Calculate appropriate figure size based on left and right data. + """ + if vertical: + # calculate required number of cells + vcells = max(sum(self._shape(df)[0] for df in left), self._shape(right)[0]) + hcells = max(self._shape(df)[1] for df in left) + self._shape(right)[1] + else: + vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]]) + hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]]) + return hcells, vcells + + def plot(self, left, right, labels: Iterable[str] = (), vertical: bool = True): + """ + Plot left / right DataFrames in specified layout. + + Parameters + ---------- + left : list of DataFrames before operation is applied + right : DataFrame of operation result + labels : list of str to be drawn as titles of left DataFrames + vertical : bool, default True + If True, use vertical layout. If False, use horizontal layout. + """ + from matplotlib import gridspec + import matplotlib.pyplot as plt + + if not isinstance(left, list): + left = [left] + left = [self._conv(df) for df in left] + right = self._conv(right) + + hcells, vcells = self._get_cells(left, right, vertical) + + if vertical: + figsize = self.cell_width * hcells, self.cell_height * vcells + else: + # include margin for titles + figsize = self.cell_width * hcells, self.cell_height * vcells + fig = plt.figure(figsize=figsize) + + if vertical: + gs = gridspec.GridSpec(len(left), hcells) + # left + max_left_cols = max(self._shape(df)[1] for df in left) + max_left_rows = max(self._shape(df)[0] for df in left) + for i, (_left, _label) in enumerate(zip(left, labels)): + ax = fig.add_subplot(gs[i, 0:max_left_cols]) + self._make_table(ax, _left, title=_label, height=1.0 / max_left_rows) + # right + ax = plt.subplot(gs[:, max_left_cols:]) + self._make_table(ax, right, title="Result", height=1.05 / vcells) + fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95) + else: + max_rows = max(self._shape(df)[0] for df in left + [right]) + height = 1.0 / np.max(max_rows) + gs = gridspec.GridSpec(1, hcells) + # left + i = 0 + for df, _label in zip(left, labels): + sp = self._shape(df) + ax = fig.add_subplot(gs[0, i : i + sp[1]]) + self._make_table(ax, df, title=_label, height=height) + i += sp[1] + # right + ax = plt.subplot(gs[0, i:]) + self._make_table(ax, right, title="Result", height=height) + fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95) + + return fig + + def _conv(self, data): + """ + Convert each input to appropriate for table outplot. + """ + if isinstance(data, pd.Series): + if data.name is None: + data = data.to_frame(name="") + else: + data = data.to_frame() + data = data.fillna("NaN") + return data + + def _insert_index(self, data): + # insert is destructive + data = data.copy() + idx_nlevels = data.index.nlevels + if idx_nlevels == 1: + data.insert(0, "Index", data.index) + else: + for i in range(idx_nlevels): + data.insert(i, f"Index{i}", data.index._get_level_values(i)) + + col_nlevels = data.columns.nlevels + if col_nlevels > 1: + col = data.columns._get_level_values(0) + values = [ + data.columns._get_level_values(i)._values for i in range(1, col_nlevels) + ] + col_df = pd.DataFrame(values) + data.columns = col_df.columns + data = pd.concat([col_df, data]) + data.columns = col + return data + + def _make_table(self, ax, df, title: str, height: float | None = None) -> None: + if df is None: + ax.set_visible(False) + return + + from pandas import plotting + + idx_nlevels = df.index.nlevels + col_nlevels = df.columns.nlevels + # must be convert here to get index levels for colorization + df = self._insert_index(df) + tb = plotting.table(ax, df, loc=9) + tb.set_fontsize(self.font_size) + + if height is None: + height = 1.0 / (len(df) + 1) + + props = tb.properties() + for (r, c), cell in props["celld"].items(): + if c == -1: + cell.set_visible(False) + elif r < col_nlevels and c < idx_nlevels: + cell.set_visible(False) + elif r < col_nlevels or c < idx_nlevels: + cell.set_facecolor("#AAAAAA") + cell.set_height(height) + + ax.set_title(title, size=self.font_size) + ax.axis("off") + + +def main() -> None: + import matplotlib.pyplot as plt + + p = TablePlotter() + + df1 = pd.DataFrame({"A": [10, 11, 12], "B": [20, 21, 22], "C": [30, 31, 32]}) + df2 = pd.DataFrame({"A": [10, 12], "C": [30, 32]}) + + p.plot([df1, df2], pd.concat([df1, df2]), labels=["df1", "df2"], vertical=True) + plt.show() + + df3 = pd.DataFrame({"X": [10, 12], "Z": [30, 32]}) + + p.plot( + [df1, df3], pd.concat([df1, df3], axis=1), labels=["df1", "df2"], vertical=False + ) + plt.show() + + idx = pd.MultiIndex.from_tuples( + [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")] + ) + column = pd.MultiIndex.from_tuples([(1, "A"), (1, "B")]) + df3 = pd.DataFrame({"v1": [1, 2, 3, 4, 5, 6], "v2": [5, 6, 7, 8, 9, 10]}, index=idx) + df3.columns = column + p.plot(df3, df3, labels=["df3"]) + plt.show() + + +if __name__ == "__main__": + main() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/_exceptions.py b/env-llmeval/lib/python3.10/site-packages/pandas/util/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..5f50838d373154868ff7414775763a1c66853c65 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/util/_exceptions.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +import contextlib +import inspect +import os +import re +from typing import TYPE_CHECKING +import warnings + +if TYPE_CHECKING: + from collections.abc import Generator + from types import FrameType + + +@contextlib.contextmanager +def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]: + """ + Rewrite the message of an exception. + """ + try: + yield + except Exception as err: + if not err.args: + raise + msg = str(err.args[0]) + msg = msg.replace(old_name, new_name) + args: tuple[str, ...] = (msg,) + if len(err.args) > 1: + args = args + err.args[1:] + err.args = args + raise + + +def find_stack_level() -> int: + """ + Find the first place in the stack that is not inside pandas + (tests notwithstanding). + """ + + import pandas as pd + + pkg_dir = os.path.dirname(pd.__file__) + test_dir = os.path.join(pkg_dir, "tests") + + # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow + frame: FrameType | None = inspect.currentframe() + try: + n = 0 + while frame: + filename = inspect.getfile(frame) + if filename.startswith(pkg_dir) and not filename.startswith(test_dir): + frame = frame.f_back + n += 1 + else: + break + finally: + # See note in + # https://docs.python.org/3/library/inspect.html#inspect.Traceback + del frame + return n + + +@contextlib.contextmanager +def rewrite_warning( + target_message: str, + target_category: type[Warning], + new_message: str, + new_category: type[Warning] | None = None, +) -> Generator[None, None, None]: + """ + Rewrite the message of a warning. + + Parameters + ---------- + target_message : str + Warning message to match. + target_category : Warning + Warning type to match. + new_message : str + New warning message to emit. + new_category : Warning or None, default None + New warning type to emit. When None, will be the same as target_category. + """ + if new_category is None: + new_category = target_category + with warnings.catch_warnings(record=True) as record: + yield + if len(record) > 0: + match = re.compile(target_message) + for warning in record: + if warning.category is target_category and re.search( + match, str(warning.message) + ): + category = new_category + message: Warning | str = new_message + else: + category, message = warning.category, warning.message + warnings.warn_explicit( + message=message, + category=category, + filename=warning.filename, + lineno=warning.lineno, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/_print_versions.py b/env-llmeval/lib/python3.10/site-packages/pandas/util/_print_versions.py new file mode 100644 index 0000000000000000000000000000000000000000..e39c2f7badb1d1b6d513d07f328340b9d796fff2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/util/_print_versions.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +import codecs +import json +import locale +import os +import platform +import struct +import sys +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from pandas._typing import JSONSerializable + +from pandas.compat._optional import ( + VERSIONS, + get_version, + import_optional_dependency, +) + + +def _get_commit_hash() -> str | None: + """ + Use vendored versioneer code to get git hash, which handles + git worktree correctly. + """ + try: + from pandas._version_meson import ( # pyright: ignore [reportMissingImports] + __git_version__, + ) + + return __git_version__ + except ImportError: + from pandas._version import get_versions + + versions = get_versions() + return versions["full-revisionid"] + + +def _get_sys_info() -> dict[str, JSONSerializable]: + """ + Returns system information as a JSON serializable dictionary. + """ + uname_result = platform.uname() + language_code, encoding = locale.getlocale() + return { + "commit": _get_commit_hash(), + "python": ".".join([str(i) for i in sys.version_info]), + "python-bits": struct.calcsize("P") * 8, + "OS": uname_result.system, + "OS-release": uname_result.release, + "Version": uname_result.version, + "machine": uname_result.machine, + "processor": uname_result.processor, + "byteorder": sys.byteorder, + "LC_ALL": os.environ.get("LC_ALL"), + "LANG": os.environ.get("LANG"), + "LOCALE": {"language-code": language_code, "encoding": encoding}, + } + + +def _get_dependency_info() -> dict[str, JSONSerializable]: + """ + Returns dependency information as a JSON serializable dictionary. + """ + deps = [ + "pandas", + # required + "numpy", + "pytz", + "dateutil", + # install / build, + "setuptools", + "pip", + "Cython", + # test + "pytest", + "hypothesis", + # docs + "sphinx", + # Other, need a min version + "blosc", + "feather", + "xlsxwriter", + "lxml.etree", + "html5lib", + "pymysql", + "psycopg2", + "jinja2", + # Other, not imported. + "IPython", + "pandas_datareader", + ] + deps.extend(list(VERSIONS)) + + result: dict[str, JSONSerializable] = {} + for modname in deps: + mod = import_optional_dependency(modname, errors="ignore") + result[modname] = get_version(mod) if mod else None + return result + + +def show_versions(as_json: str | bool = False) -> None: + """ + Provide useful information, important for bug reports. + + It comprises info about hosting operation system, pandas version, + and versions of other installed relative packages. + + Parameters + ---------- + as_json : str or bool, default False + * If False, outputs info in a human readable form to the console. + * If str, it will be considered as a path to a file. + Info will be written to that file in JSON format. + * If True, outputs info in JSON format to the console. + + Examples + -------- + >>> pd.show_versions() # doctest: +SKIP + Your output may look something like this: + INSTALLED VERSIONS + ------------------ + commit : 37ea63d540fd27274cad6585082c91b1283f963d + python : 3.10.6.final.0 + python-bits : 64 + OS : Linux + OS-release : 5.10.102.1-microsoft-standard-WSL2 + Version : #1 SMP Wed Mar 2 00:30:59 UTC 2022 + machine : x86_64 + processor : x86_64 + byteorder : little + LC_ALL : None + LANG : en_GB.UTF-8 + LOCALE : en_GB.UTF-8 + pandas : 2.0.1 + numpy : 1.24.3 + ... + """ + sys_info = _get_sys_info() + deps = _get_dependency_info() + + if as_json: + j = {"system": sys_info, "dependencies": deps} + + if as_json is True: + sys.stdout.writelines(json.dumps(j, indent=2)) + else: + assert isinstance(as_json, str) # needed for mypy + with codecs.open(as_json, "wb", encoding="utf8") as f: + json.dump(j, f, indent=2) + + else: + assert isinstance(sys_info["LOCALE"], dict) # needed for mypy + language_code = sys_info["LOCALE"]["language-code"] + encoding = sys_info["LOCALE"]["encoding"] + sys_info["LOCALE"] = f"{language_code}.{encoding}" + + maxlen = max(len(x) for x in deps) + print("\nINSTALLED VERSIONS") + print("------------------") + for k, v in sys_info.items(): + print(f"{k:<{maxlen}}: {v}") + print("") + for k, v in deps.items(): + print(f"{k:<{maxlen}}: {v}") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/_test_decorators.py b/env-llmeval/lib/python3.10/site-packages/pandas/util/_test_decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1912bce856dd2694447d820ea2c5124be9c1a0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/util/_test_decorators.py @@ -0,0 +1,173 @@ +""" +This module provides decorator functions which can be applied to test objects +in order to skip those objects when certain conditions occur. A sample use case +is to detect if the platform is missing ``matplotlib``. If so, any test objects +which require ``matplotlib`` and decorated with ``@td.skip_if_no("matplotlib")`` +will be skipped by ``pytest`` during the execution of the test suite. + +To illustrate, after importing this module: + +import pandas.util._test_decorators as td + +The decorators can be applied to classes: + +@td.skip_if_no("package") +class Foo: + ... + +Or individual functions: + +@td.skip_if_no("package") +def test_foo(): + ... + +For more information, refer to the ``pytest`` documentation on ``skipif``. +""" +from __future__ import annotations + +import locale +from typing import ( + TYPE_CHECKING, + Callable, +) + +import pytest + +from pandas._config import get_option + +if TYPE_CHECKING: + from pandas._typing import F + +from pandas._config.config import _get_option + +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.compat._optional import import_optional_dependency + + +def skip_if_installed(package: str) -> pytest.MarkDecorator: + """ + Skip a test if a package is installed. + + Parameters + ---------- + package : str + The name of the package. + + Returns + ------- + pytest.MarkDecorator + a pytest.mark.skipif to use as either a test decorator or a + parametrization mark. + """ + return pytest.mark.skipif( + bool(import_optional_dependency(package, errors="ignore")), + reason=f"Skipping because {package} is installed.", + ) + + +def skip_if_no(package: str, min_version: str | None = None) -> pytest.MarkDecorator: + """ + Generic function to help skip tests when required packages are not + present on the testing system. + + This function returns a pytest mark with a skip condition that will be + evaluated during test collection. An attempt will be made to import the + specified ``package`` and optionally ensure it meets the ``min_version`` + + The mark can be used as either a decorator for a test class or to be + applied to parameters in pytest.mark.parametrize calls or parametrized + fixtures. Use pytest.importorskip if an imported moduled is later needed + or for test functions. + + If the import and version check are unsuccessful, then the test function + (or test case when used in conjunction with parametrization) will be + skipped. + + Parameters + ---------- + package: str + The name of the required package. + min_version: str or None, default None + Optional minimum version of the package. + + Returns + ------- + pytest.MarkDecorator + a pytest.mark.skipif to use as either a test decorator or a + parametrization mark. + """ + msg = f"Could not import '{package}'" + if min_version: + msg += f" satisfying a min_version of {min_version}" + return pytest.mark.skipif( + not bool( + import_optional_dependency( + package, errors="ignore", min_version=min_version + ) + ), + reason=msg, + ) + + +skip_if_32bit = pytest.mark.skipif(not IS64, reason="skipping for 32 bit") +skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows") +skip_if_not_us_locale = pytest.mark.skipif( + locale.getlocale()[0] != "en_US", + reason=f"Set local {locale.getlocale()[0]} is not en_US", +) + + +def parametrize_fixture_doc(*args) -> Callable[[F], F]: + """ + Intended for use as a decorator for parametrized fixture, + this function will wrap the decorated function with a pytest + ``parametrize_fixture_doc`` mark. That mark will format + initial fixture docstring by replacing placeholders {0}, {1} etc + with parameters passed as arguments. + + Parameters + ---------- + args: iterable + Positional arguments for docstring. + + Returns + ------- + function + The decorated function wrapped within a pytest + ``parametrize_fixture_doc`` mark + """ + + def documented_fixture(fixture): + fixture.__doc__ = fixture.__doc__.format(*args) + return fixture + + return documented_fixture + + +def mark_array_manager_not_yet_implemented(request) -> None: + mark = pytest.mark.xfail(reason="Not yet implemented for ArrayManager") + request.applymarker(mark) + + +skip_array_manager_not_yet_implemented = pytest.mark.xfail( + _get_option("mode.data_manager", silent=True) == "array", + reason="Not yet implemented for ArrayManager", +) + +skip_array_manager_invalid_test = pytest.mark.skipif( + _get_option("mode.data_manager", silent=True) == "array", + reason="Test that relies on BlockManager internals or specific behaviour", +) + +skip_copy_on_write_not_yet_implemented = pytest.mark.xfail( + get_option("mode.copy_on_write") is True, + reason="Not yet implemented/adapted for Copy-on-Write mode", +) + +skip_copy_on_write_invalid_test = pytest.mark.skipif( + get_option("mode.copy_on_write") is True, + reason="Test not valid for Copy-on-Write mode", +) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/_tester.py b/env-llmeval/lib/python3.10/site-packages/pandas/util/_tester.py new file mode 100644 index 0000000000000000000000000000000000000000..7cfddef7ddff87275ebf31eb7ec10e65d26f8668 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/util/_tester.py @@ -0,0 +1,53 @@ +""" +Entrypoint for testing from the top-level namespace. +""" +from __future__ import annotations + +import os +import sys + +from pandas.compat._optional import import_optional_dependency + +PKG = os.path.dirname(os.path.dirname(__file__)) + + +def test(extra_args: list[str] | None = None, run_doctests: bool = False) -> None: + """ + Run the pandas test suite using pytest. + + By default, runs with the marks -m "not slow and not network and not db" + + Parameters + ---------- + extra_args : list[str], default None + Extra marks to run the tests. + run_doctests : bool, default False + Whether to only run the Python and Cython doctests. If you would like to run + both doctests/regular tests, just append "--doctest-modules"/"--doctest-cython" + to extra_args. + + Examples + -------- + >>> pd.test() # doctest: +SKIP + running: pytest... + """ + pytest = import_optional_dependency("pytest") + import_optional_dependency("hypothesis") + cmd = ["-m not slow and not network and not db"] + if extra_args: + if not isinstance(extra_args, list): + extra_args = [extra_args] + cmd = extra_args + if run_doctests: + cmd = [ + "--doctest-modules", + "--doctest-cython", + f"--ignore={os.path.join(PKG, 'tests')}", + ] + cmd += [PKG] + joined = " ".join(cmd) + print(f"running: pytest {joined}") + sys.exit(pytest.main(cmd)) + + +__all__ = ["test"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/_validators.py b/env-llmeval/lib/python3.10/site-packages/pandas/util/_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..cb0b4d549f49ea972d50c97986c60be64c021c3c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/util/_validators.py @@ -0,0 +1,456 @@ +""" +Module that contains many useful utilities +for validating data or function arguments +""" +from __future__ import annotations + +from collections.abc import ( + Iterable, + Sequence, +) +from typing import ( + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.common import ( + is_bool, + is_integer, +) + +BoolishT = TypeVar("BoolishT", bool, int) +BoolishNoneT = TypeVar("BoolishNoneT", bool, int, None) + + +def _check_arg_length(fname, args, max_fname_arg_count, compat_args) -> None: + """ + Checks whether 'args' has length of at most 'compat_args'. Raises + a TypeError if that is not the case, similar to in Python when a + function is called with too many arguments. + """ + if max_fname_arg_count < 0: + raise ValueError("'max_fname_arg_count' must be non-negative") + + if len(args) > len(compat_args): + max_arg_count = len(compat_args) + max_fname_arg_count + actual_arg_count = len(args) + max_fname_arg_count + argument = "argument" if max_arg_count == 1 else "arguments" + + raise TypeError( + f"{fname}() takes at most {max_arg_count} {argument} " + f"({actual_arg_count} given)" + ) + + +def _check_for_default_values(fname, arg_val_dict, compat_args) -> None: + """ + Check that the keys in `arg_val_dict` are mapped to their + default values as specified in `compat_args`. + + Note that this function is to be called only when it has been + checked that arg_val_dict.keys() is a subset of compat_args + """ + for key in arg_val_dict: + # try checking equality directly with '=' operator, + # as comparison may have been overridden for the left + # hand object + try: + v1 = arg_val_dict[key] + v2 = compat_args[key] + + # check for None-ness otherwise we could end up + # comparing a numpy array vs None + if (v1 is not None and v2 is None) or (v1 is None and v2 is not None): + match = False + else: + match = v1 == v2 + + if not is_bool(match): + raise ValueError("'match' is not a boolean") + + # could not compare them directly, so try comparison + # using the 'is' operator + except ValueError: + match = arg_val_dict[key] is compat_args[key] + + if not match: + raise ValueError( + f"the '{key}' parameter is not supported in " + f"the pandas implementation of {fname}()" + ) + + +def validate_args(fname, args, max_fname_arg_count, compat_args) -> None: + """ + Checks whether the length of the `*args` argument passed into a function + has at most `len(compat_args)` arguments and whether or not all of these + elements in `args` are set to their default values. + + Parameters + ---------- + fname : str + The name of the function being passed the `*args` parameter + args : tuple + The `*args` parameter passed into a function + max_fname_arg_count : int + The maximum number of arguments that the function `fname` + can accept, excluding those in `args`. Used for displaying + appropriate error messages. Must be non-negative. + compat_args : dict + A dictionary of keys and their associated default values. + In order to accommodate buggy behaviour in some versions of `numpy`, + where a signature displayed keyword arguments but then passed those + arguments **positionally** internally when calling downstream + implementations, a dict ensures that the original + order of the keyword arguments is enforced. + + Raises + ------ + TypeError + If `args` contains more values than there are `compat_args` + ValueError + If `args` contains values that do not correspond to those + of the default values specified in `compat_args` + """ + _check_arg_length(fname, args, max_fname_arg_count, compat_args) + + # We do this so that we can provide a more informative + # error message about the parameters that we are not + # supporting in the pandas implementation of 'fname' + kwargs = dict(zip(compat_args, args)) + _check_for_default_values(fname, kwargs, compat_args) + + +def _check_for_invalid_keys(fname, kwargs, compat_args) -> None: + """ + Checks whether 'kwargs' contains any keys that are not + in 'compat_args' and raises a TypeError if there is one. + """ + # set(dict) --> set of the dictionary's keys + diff = set(kwargs) - set(compat_args) + + if diff: + bad_arg = next(iter(diff)) + raise TypeError(f"{fname}() got an unexpected keyword argument '{bad_arg}'") + + +def validate_kwargs(fname, kwargs, compat_args) -> None: + """ + Checks whether parameters passed to the **kwargs argument in a + function `fname` are valid parameters as specified in `*compat_args` + and whether or not they are set to their default values. + + Parameters + ---------- + fname : str + The name of the function being passed the `**kwargs` parameter + kwargs : dict + The `**kwargs` parameter passed into `fname` + compat_args: dict + A dictionary of keys that `kwargs` is allowed to have and their + associated default values + + Raises + ------ + TypeError if `kwargs` contains keys not in `compat_args` + ValueError if `kwargs` contains keys in `compat_args` that do not + map to the default values specified in `compat_args` + """ + kwds = kwargs.copy() + _check_for_invalid_keys(fname, kwargs, compat_args) + _check_for_default_values(fname, kwds, compat_args) + + +def validate_args_and_kwargs( + fname, args, kwargs, max_fname_arg_count, compat_args +) -> None: + """ + Checks whether parameters passed to the *args and **kwargs argument in a + function `fname` are valid parameters as specified in `*compat_args` + and whether or not they are set to their default values. + + Parameters + ---------- + fname: str + The name of the function being passed the `**kwargs` parameter + args: tuple + The `*args` parameter passed into a function + kwargs: dict + The `**kwargs` parameter passed into `fname` + max_fname_arg_count: int + The minimum number of arguments that the function `fname` + requires, excluding those in `args`. Used for displaying + appropriate error messages. Must be non-negative. + compat_args: dict + A dictionary of keys that `kwargs` is allowed to + have and their associated default values. + + Raises + ------ + TypeError if `args` contains more values than there are + `compat_args` OR `kwargs` contains keys not in `compat_args` + ValueError if `args` contains values not at the default value (`None`) + `kwargs` contains keys in `compat_args` that do not map to the default + value as specified in `compat_args` + + See Also + -------- + validate_args : Purely args validation. + validate_kwargs : Purely kwargs validation. + + """ + # Check that the total number of arguments passed in (i.e. + # args and kwargs) does not exceed the length of compat_args + _check_arg_length( + fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args + ) + + # Check there is no overlap with the positional and keyword + # arguments, similar to what is done in actual Python functions + args_dict = dict(zip(compat_args, args)) + + for key in args_dict: + if key in kwargs: + raise TypeError( + f"{fname}() got multiple values for keyword argument '{key}'" + ) + + kwargs.update(args_dict) + validate_kwargs(fname, kwargs, compat_args) + + +def validate_bool_kwarg( + value: BoolishNoneT, + arg_name: str, + none_allowed: bool = True, + int_allowed: bool = False, +) -> BoolishNoneT: + """ + Ensure that argument passed in arg_name can be interpreted as boolean. + + Parameters + ---------- + value : bool + Value to be validated. + arg_name : str + Name of the argument. To be reflected in the error message. + none_allowed : bool, default True + Whether to consider None to be a valid boolean. + int_allowed : bool, default False + Whether to consider integer value to be a valid boolean. + + Returns + ------- + value + The same value as input. + + Raises + ------ + ValueError + If the value is not a valid boolean. + """ + good_value = is_bool(value) + if none_allowed: + good_value = good_value or (value is None) + + if int_allowed: + good_value = good_value or isinstance(value, int) + + if not good_value: + raise ValueError( + f'For argument "{arg_name}" expected type bool, received ' + f"type {type(value).__name__}." + ) + return value # pyright: ignore[reportGeneralTypeIssues] + + +def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True): + """ + Validate the keyword arguments to 'fillna'. + + This checks that exactly one of 'value' and 'method' is specified. + If 'method' is specified, this validates that it's a valid method. + + Parameters + ---------- + value, method : object + The 'value' and 'method' keyword arguments for 'fillna'. + validate_scalar_dict_value : bool, default True + Whether to validate that 'value' is a scalar or dict. Specifically, + validate that it is not a list or tuple. + + Returns + ------- + value, method : object + """ + from pandas.core.missing import clean_fill_method + + if value is None and method is None: + raise ValueError("Must specify a fill 'value' or 'method'.") + if value is None and method is not None: + method = clean_fill_method(method) + + elif value is not None and method is None: + if validate_scalar_dict_value and isinstance(value, (list, tuple)): + raise TypeError( + '"value" parameter must be a scalar or dict, but ' + f'you passed a "{type(value).__name__}"' + ) + + elif value is not None and method is not None: + raise ValueError("Cannot specify both 'value' and 'method'.") + + return value, method + + +def validate_percentile(q: float | Iterable[float]) -> np.ndarray: + """ + Validate percentiles (used by describe and quantile). + + This function checks if the given float or iterable of floats is a valid percentile + otherwise raises a ValueError. + + Parameters + ---------- + q: float or iterable of floats + A single percentile or an iterable of percentiles. + + Returns + ------- + ndarray + An ndarray of the percentiles if valid. + + Raises + ------ + ValueError if percentiles are not in given interval([0, 1]). + """ + q_arr = np.asarray(q) + # Don't change this to an f-string. The string formatting + # is too expensive for cases where we don't need it. + msg = "percentiles should all be in the interval [0, 1]" + if q_arr.ndim == 0: + if not 0 <= q_arr <= 1: + raise ValueError(msg) + else: + if not all(0 <= qs <= 1 for qs in q_arr): + raise ValueError(msg) + return q_arr + + +@overload +def validate_ascending(ascending: BoolishT) -> BoolishT: + ... + + +@overload +def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]: + ... + + +def validate_ascending( + ascending: bool | int | Sequence[BoolishT], +) -> bool | int | list[BoolishT]: + """Validate ``ascending`` kwargs for ``sort_index`` method.""" + kwargs = {"none_allowed": False, "int_allowed": True} + if not isinstance(ascending, Sequence): + return validate_bool_kwarg(ascending, "ascending", **kwargs) + + return [validate_bool_kwarg(item, "ascending", **kwargs) for item in ascending] + + +def validate_endpoints(closed: str | None) -> tuple[bool, bool]: + """ + Check that the `closed` argument is among [None, "left", "right"] + + Parameters + ---------- + closed : {None, "left", "right"} + + Returns + ------- + left_closed : bool + right_closed : bool + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_closed = False + right_closed = False + + if closed is None: + left_closed = True + right_closed = True + elif closed == "left": + left_closed = True + elif closed == "right": + right_closed = True + else: + raise ValueError("Closed has to be either 'left', 'right' or None") + + return left_closed, right_closed + + +def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]: + """ + Check that the `inclusive` argument is among {"both", "neither", "left", "right"}. + + Parameters + ---------- + inclusive : {"both", "neither", "left", "right"} + + Returns + ------- + left_right_inclusive : tuple[bool, bool] + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_right_inclusive: tuple[bool, bool] | None = None + + if isinstance(inclusive, str): + left_right_inclusive = { + "both": (True, True), + "left": (True, False), + "right": (False, True), + "neither": (False, False), + }.get(inclusive) + + if left_right_inclusive is None: + raise ValueError( + "Inclusive has to be either 'both', 'neither', 'left' or 'right'" + ) + + return left_right_inclusive + + +def validate_insert_loc(loc: int, length: int) -> int: + """ + Check that we have an integer between -length and length, inclusive. + + Standardize negative loc to within [0, length]. + + The exceptions we raise on failure match np.insert. + """ + if not is_integer(loc): + raise TypeError(f"loc must be an integer between -{length} and {length}") + + if loc < 0: + loc += length + if not 0 <= loc <= length: + raise IndexError(f"loc must be an integer between -{length} and {length}") + return loc # pyright: ignore[reportGeneralTypeIssues] + + +def check_dtype_backend(dtype_backend) -> None: + if dtype_backend is not lib.no_default: + if dtype_backend not in ["numpy_nullable", "pyarrow"]: + raise ValueError( + f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " + f"'pyarrow' are allowed.", + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/version/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/util/version/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5efbbb09c1e5085bf0564d819be345edd8c827 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/util/version/__init__.py @@ -0,0 +1,579 @@ +# Vendored from https://github.com/pypa/packaging/blob/main/packaging/_structures.py +# and https://github.com/pypa/packaging/blob/main/packaging/_structures.py +# changeset ae891fd74d6dd4c6063bb04f2faeadaac6fc6313 +# 04/30/2021 + +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. Licence at LICENSES/PACKAGING_LICENSE +from __future__ import annotations + +import collections +from collections.abc import Iterator +import itertools +import re +from typing import ( + Callable, + SupportsInt, + Tuple, + Union, +) +import warnings + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, type(self)) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> NegativeInfinityType: + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, type(self)) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() + + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + tuple[ + Union[ + SubLocalType, + tuple[SubLocalType, str], + tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = tuple[ + int, tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = tuple[int, tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> LegacyVersion | Version: + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + + Examples + -------- + >>> pd.util.version.Version('1.') + Traceback (most recent call last): + InvalidVersion: Invalid version: '1.' + """ + + +class _BaseVersion: + _key: CmpKey | LegacyCmpKey + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version: str) -> None: + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release.", + DeprecationWarning, + ) + + def __str__(self) -> str: + return self._version + + def __repr__(self) -> str: + return f"" + + @property + def public(self) -> str: + return self._version + + @property + def base_version(self) -> str: + return self._version + + @property + def epoch(self) -> int: + return -1 + + @property + def release(self) -> None: + return None + + @property + def pre(self) -> None: + return None + + @property + def post(self) -> None: + return None + + @property + def dev(self) -> None: + return None + + @property + def local(self) -> None: + return None + + @property + def is_prerelease(self) -> bool: + return False + + @property + def is_postrelease(self) -> bool: + return False + + @property + def is_devrelease(self) -> bool: + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s: str) -> Iterator[str]: + for part in _legacy_version_component_re.split(s): + mapped_part = _legacy_version_replacement_map.get(part, part) + + if not mapped_part or mapped_part == ".": + continue + + if mapped_part[:1] in "0123456789": + # pad for numeric comparison + yield mapped_part.zfill(8) + else: + yield "*" + mapped_part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version: str) -> LegacyCmpKey: + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts: list[str] = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join([str(x) for x in self.release]))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join([str(x) for x in self.pre]))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> tuple[int, ...]:
+        _release: tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> tuple[str, int] | None:
+        _pre: tuple[str, int] | None = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> int | None:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> int | None:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> str | None:
+        if self._version.local:
+            return ".".join([str(x) for x in self._version.local])
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join([str(x) for x in self.release]))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: str | bytes | SupportsInt
+) -> tuple[str, int] | None:
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> LocalType | None:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: tuple[int, ...],
+    pre: tuple[str, int] | None,
+    post: tuple[str, int] | None,
+    dev: tuple[str, int] | None,
+    local: tuple[SubLocalType] | None,
+) -> CmpKey:
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/util/version/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/util/version/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49627fa88c4b2c7be43cead3cb24965712efe52c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/util/version/__pycache__/__init__.cpython-310.pyc differ