diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e54600b5d69bc7a11e1dde6e0b537b097afd68f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_arithmetic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b22d7694542e962875b676f98e32eb094cb40ba3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_astype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fcecfb243d3d95b284436402f7b9b3e724852dc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_astype.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_comparison.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_comparison.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fbf31544f8db75f5bdc4fbee2ed838b33451087 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_comparison.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_construction.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_construction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd5b8e50bb90e3eb97c80d63cfac99ad8c4b1bd9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_construction.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_function.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd7dea0ee3fb6d7f4b8fbec0cd602a1a73fb32a5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_function.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_indexing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a499ec9a52a61c42ca3fa8b3ab1e1eb9a7d55598 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_logical.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_logical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0e6282790ea8acf5158d82f28cafdd2a0634c11 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_logical.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_ops.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e16032393d695bc4fe06b320beb5e2e84c0ce3f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_ops.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_repr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_repr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..417e32541137b0e6994499f77c399609bc16c2d7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_repr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08a4268eb9d3d97a1eb226339adf345d7bc6e647 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02575f1d8feb4f24e99f5bae06c12e46e06d9cba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_eval.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_eval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..983e415ed7b67fc0c0f300628b16ef891d9d760d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_eval.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/test_compat.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/test_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..856a5b3a22a95d35cc577050f52d762b065e3ddf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/test_compat.py @@ -0,0 +1,32 @@ +import pytest + +from pandas.compat._optional import VERSIONS + +import pandas as pd +from pandas.core.computation import expr +from pandas.core.computation.engines import ENGINES +from pandas.util.version import Version + + +def test_compat(): + # test we have compat with our version of numexpr + + from pandas.core.computation.check import NUMEXPR_INSTALLED + + ne = pytest.importorskip("numexpr") + + ver = ne.__version__ + if Version(ver) < Version(VERSIONS["numexpr"]): + assert not NUMEXPR_INSTALLED + else: + assert NUMEXPR_INSTALLED + + +@pytest.mark.parametrize("engine", ENGINES) +@pytest.mark.parametrize("parser", expr.PARSERS) +def test_invalid_numexpr_version(engine, parser): + if engine == "numexpr": + pytest.importorskip("numexpr") + a, b = 1, 2 # noqa: F841 + res = pd.eval("a + b", engine=engine, parser=parser) + assert res == 3 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..17630f14b08c79567cf5cb8febd4cac84968a7d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py @@ -0,0 +1,1991 @@ +from __future__ import annotations + +from functools import reduce +from itertools import product +import operator + +import numpy as np +import pytest + +from pandas.compat import PY312 +from pandas.errors import ( + NumExprClobberingError, + PerformanceWarning, + UndefinedVariableError, +) +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import ( + is_bool, + is_float, + is_list_like, + is_scalar, +) + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.computation import ( + expr, + pytables, +) +from pandas.core.computation.engines import ENGINES +from pandas.core.computation.expr import ( + BaseExprVisitor, + PandasExprVisitor, + PythonExprVisitor, +) +from pandas.core.computation.expressions import ( + NUMEXPR_INSTALLED, + USE_NUMEXPR, +) +from pandas.core.computation.ops import ( + ARITH_OPS_SYMS, + SPECIAL_CASE_ARITH_OPS_SYMS, + _binary_math_ops, + _binary_ops_dict, + _unary_math_ops, +) +from pandas.core.computation.scope import DEFAULT_GLOBALS + + +@pytest.fixture( + params=( + pytest.param( + engine, + marks=[ + pytest.mark.skipif( + engine == "numexpr" and not USE_NUMEXPR, + reason=f"numexpr enabled->{USE_NUMEXPR}, " + f"installed->{NUMEXPR_INSTALLED}", + ), + td.skip_if_no("numexpr"), + ], + ) + for engine in ENGINES + ) +) +def engine(request): + return request.param + + +@pytest.fixture(params=expr.PARSERS) +def parser(request): + return request.param + + +def _eval_single_bin(lhs, cmp1, rhs, engine): + c = _binary_ops_dict[cmp1] + if ENGINES[engine].has_neg_frac: + try: + return c(lhs, rhs) + except ValueError as e: + if str(e).startswith( + "negative number cannot be raised to a fractional power" + ): + return np.nan + raise + return c(lhs, rhs) + + +# TODO: using range(5) here is a kludge +@pytest.fixture( + params=list(range(5)), + ids=["DataFrame", "Series", "SeriesNaN", "DataFrameNaN", "float"], +) +def lhs(request): + nan_df1 = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + nan_df1[nan_df1 > 0.5] = np.nan + + opts = ( + DataFrame(np.random.default_rng(2).standard_normal((10, 5))), + Series(np.random.default_rng(2).standard_normal(5)), + Series([1, 2, np.nan, np.nan, 5]), + nan_df1, + np.random.default_rng(2).standard_normal(), + ) + return opts[request.param] + + +rhs = lhs +midhs = lhs + + +@pytest.fixture +def idx_func_dict(): + return { + "i": lambda n: Index(np.arange(n), dtype=np.int64), + "f": lambda n: Index(np.arange(n), dtype=np.float64), + "s": lambda n: Index([f"{i}_{chr(i)}" for i in range(97, 97 + n)]), + "dt": lambda n: date_range("2020-01-01", periods=n), + "td": lambda n: timedelta_range("1 day", periods=n), + "p": lambda n: period_range("2020-01-01", periods=n, freq="D"), + } + + +class TestEval: + @pytest.mark.parametrize( + "cmp1", + ["!=", "==", "<=", ">=", "<", ">"], + ids=["ne", "eq", "le", "ge", "lt", "gt"], + ) + @pytest.mark.parametrize("cmp2", [">", "<"], ids=["gt", "lt"]) + @pytest.mark.parametrize("binop", expr.BOOL_OPS_SYMS) + def test_complex_cmp_ops(self, cmp1, cmp2, binop, lhs, rhs, engine, parser): + if parser == "python" and binop in ["and", "or"]: + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)" + pd.eval(ex, engine=engine, parser=parser) + return + + lhs_new = _eval_single_bin(lhs, cmp1, rhs, engine) + rhs_new = _eval_single_bin(lhs, cmp2, rhs, engine) + expected = _eval_single_bin(lhs_new, binop, rhs_new, engine) + + ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)" + result = pd.eval(ex, engine=engine, parser=parser) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("cmp_op", expr.CMP_OPS_SYMS) + def test_simple_cmp_ops(self, cmp_op, lhs, rhs, engine, parser): + lhs = lhs < 0 + rhs = rhs < 0 + + if parser == "python" and cmp_op in ["in", "not in"]: + msg = "'(In|NotIn)' nodes are not implemented" + + with pytest.raises(NotImplementedError, match=msg): + ex = f"lhs {cmp_op} rhs" + pd.eval(ex, engine=engine, parser=parser) + return + + ex = f"lhs {cmp_op} rhs" + msg = "|".join( + [ + r"only list-like( or dict-like)? objects are allowed to be " + r"passed to (DataFrame\.)?isin\(\), you passed a " + r"(`|')bool(`|')", + "argument of type 'bool' is not iterable", + ] + ) + if cmp_op in ("in", "not in") and not is_list_like(rhs): + with pytest.raises(TypeError, match=msg): + pd.eval( + ex, + engine=engine, + parser=parser, + local_dict={"lhs": lhs, "rhs": rhs}, + ) + else: + expected = _eval_single_bin(lhs, cmp_op, rhs, engine) + result = pd.eval(ex, engine=engine, parser=parser) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("op", expr.CMP_OPS_SYMS) + def test_compound_invert_op(self, op, lhs, rhs, request, engine, parser): + if parser == "python" and op in ["in", "not in"]: + msg = "'(In|NotIn)' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + ex = f"~(lhs {op} rhs)" + pd.eval(ex, engine=engine, parser=parser) + return + + if ( + is_float(lhs) + and not is_float(rhs) + and op in ["in", "not in"] + and engine == "python" + and parser == "pandas" + ): + mark = pytest.mark.xfail( + reason="Looks like expected is negative, unclear whether " + "expected is incorrect or result is incorrect" + ) + request.applymarker(mark) + skip_these = ["in", "not in"] + ex = f"~(lhs {op} rhs)" + + msg = "|".join( + [ + r"only list-like( or dict-like)? objects are allowed to be " + r"passed to (DataFrame\.)?isin\(\), you passed a " + r"(`|')float(`|')", + "argument of type 'float' is not iterable", + ] + ) + if is_scalar(rhs) and op in skip_these: + with pytest.raises(TypeError, match=msg): + pd.eval( + ex, + engine=engine, + parser=parser, + local_dict={"lhs": lhs, "rhs": rhs}, + ) + else: + # compound + if is_scalar(lhs) and is_scalar(rhs): + lhs, rhs = (np.array([x]) for x in (lhs, rhs)) + expected = _eval_single_bin(lhs, op, rhs, engine) + if is_scalar(expected): + expected = not expected + else: + expected = ~expected + result = pd.eval(ex, engine=engine, parser=parser) + tm.assert_almost_equal(expected, result) + + @pytest.mark.parametrize("cmp1", ["<", ">"]) + @pytest.mark.parametrize("cmp2", ["<", ">"]) + def test_chained_cmp_op(self, cmp1, cmp2, lhs, midhs, rhs, engine, parser): + mid = midhs + if parser == "python": + ex1 = f"lhs {cmp1} mid {cmp2} rhs" + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex1, engine=engine, parser=parser) + return + + lhs_new = _eval_single_bin(lhs, cmp1, mid, engine) + rhs_new = _eval_single_bin(mid, cmp2, rhs, engine) + + if lhs_new is not None and rhs_new is not None: + ex1 = f"lhs {cmp1} mid {cmp2} rhs" + ex2 = f"lhs {cmp1} mid and mid {cmp2} rhs" + ex3 = f"(lhs {cmp1} mid) & (mid {cmp2} rhs)" + expected = _eval_single_bin(lhs_new, "&", rhs_new, engine) + + for ex in (ex1, ex2, ex3): + result = pd.eval(ex, engine=engine, parser=parser) + + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize( + "arith1", sorted(set(ARITH_OPS_SYMS).difference(SPECIAL_CASE_ARITH_OPS_SYMS)) + ) + def test_binary_arith_ops(self, arith1, lhs, rhs, engine, parser): + ex = f"lhs {arith1} rhs" + result = pd.eval(ex, engine=engine, parser=parser) + expected = _eval_single_bin(lhs, arith1, rhs, engine) + + tm.assert_almost_equal(result, expected) + ex = f"lhs {arith1} rhs {arith1} rhs" + result = pd.eval(ex, engine=engine, parser=parser) + nlhs = _eval_single_bin(lhs, arith1, rhs, engine) + try: + nlhs, ghs = nlhs.align(rhs) + except (ValueError, TypeError, AttributeError): + # ValueError: series frame or frame series align + # TypeError, AttributeError: series or frame with scalar align + return + else: + if engine == "numexpr": + import numexpr as ne + + # direct numpy comparison + expected = ne.evaluate(f"nlhs {arith1} ghs") + # Update assert statement due to unreliable numerical + # precision component (GH37328) + # TODO: update testing code so that assert_almost_equal statement + # can be replaced again by the assert_numpy_array_equal statement + tm.assert_almost_equal(result.values, expected) + else: + expected = eval(f"nlhs {arith1} ghs") + tm.assert_almost_equal(result, expected) + + # modulus, pow, and floor division require special casing + + def test_modulus(self, lhs, rhs, engine, parser): + ex = r"lhs % rhs" + result = pd.eval(ex, engine=engine, parser=parser) + expected = lhs % rhs + tm.assert_almost_equal(result, expected) + + if engine == "numexpr": + import numexpr as ne + + expected = ne.evaluate(r"expected % rhs") + if isinstance(result, (DataFrame, Series)): + tm.assert_almost_equal(result.values, expected) + else: + tm.assert_almost_equal(result, expected.item()) + else: + expected = _eval_single_bin(expected, "%", rhs, engine) + tm.assert_almost_equal(result, expected) + + def test_floor_division(self, lhs, rhs, engine, parser): + ex = "lhs // rhs" + + if engine == "python": + res = pd.eval(ex, engine=engine, parser=parser) + expected = lhs // rhs + tm.assert_equal(res, expected) + else: + msg = ( + r"unsupported operand type\(s\) for //: 'VariableNode' and " + "'VariableNode'" + ) + with pytest.raises(TypeError, match=msg): + pd.eval( + ex, + local_dict={"lhs": lhs, "rhs": rhs}, + engine=engine, + parser=parser, + ) + + @td.skip_if_windows + def test_pow(self, lhs, rhs, engine, parser): + # odd failure on win32 platform, so skip + ex = "lhs ** rhs" + expected = _eval_single_bin(lhs, "**", rhs, engine) + result = pd.eval(ex, engine=engine, parser=parser) + + if ( + is_scalar(lhs) + and is_scalar(rhs) + and isinstance(expected, (complex, np.complexfloating)) + and np.isnan(result) + ): + msg = "(DataFrame.columns|numpy array) are different" + with pytest.raises(AssertionError, match=msg): + tm.assert_numpy_array_equal(result, expected) + else: + tm.assert_almost_equal(result, expected) + + ex = "(lhs ** rhs) ** rhs" + result = pd.eval(ex, engine=engine, parser=parser) + + middle = _eval_single_bin(lhs, "**", rhs, engine) + expected = _eval_single_bin(middle, "**", rhs, engine) + tm.assert_almost_equal(result, expected) + + def test_check_single_invert_op(self, lhs, engine, parser): + # simple + try: + elb = lhs.astype(bool) + except AttributeError: + elb = np.array([bool(lhs)]) + expected = ~elb + result = pd.eval("~elb", engine=engine, parser=parser) + tm.assert_almost_equal(expected, result) + + def test_frame_invert(self, engine, parser): + expr = "~lhs" + + # ~ ## + # frame + # float always raises + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2))) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert_dd'" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + msg = "ufunc 'invert' not supported for the input types" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + # int raises on numexpr + lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # bool always works + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5) + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # object raises + lhs = DataFrame( + {"b": ["a", 1, 2.0], "c": np.random.default_rng(2).standard_normal(3) > 0.5} + ) + if engine == "numexpr": + with pytest.raises(ValueError, match="unknown type object"): + pd.eval(expr, engine=engine, parser=parser) + else: + msg = "bad operand type for unary ~: 'str'" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + def test_series_invert(self, engine, parser): + # ~ #### + expr = "~lhs" + + # series + # float raises + lhs = Series(np.random.default_rng(2).standard_normal(5)) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert_dd'" + with pytest.raises(NotImplementedError, match=msg): + result = pd.eval(expr, engine=engine, parser=parser) + else: + msg = "ufunc 'invert' not supported for the input types" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + # int raises on numexpr + lhs = Series(np.random.default_rng(2).integers(5, size=5)) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # bool + lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5) + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # float + # int + # bool + + # object + lhs = Series(["a", 1, 2.0]) + if engine == "numexpr": + with pytest.raises(ValueError, match="unknown type object"): + pd.eval(expr, engine=engine, parser=parser) + else: + msg = "bad operand type for unary ~: 'str'" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + def test_frame_negate(self, engine, parser): + expr = "-lhs" + + # float + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2))) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # int + lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'neg_bb'" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + def test_series_negate(self, engine, parser): + expr = "-lhs" + + # float + lhs = Series(np.random.default_rng(2).standard_normal(5)) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # int + lhs = Series(np.random.default_rng(2).integers(5, size=5)) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'neg_bb'" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + @pytest.mark.parametrize( + "lhs", + [ + # Float + DataFrame(np.random.default_rng(2).standard_normal((5, 2))), + # Int + DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))), + # bool doesn't work with numexpr but works elsewhere + DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5), + ], + ) + def test_frame_pos(self, lhs, engine, parser): + expr = "+lhs" + expect = lhs + + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + @pytest.mark.parametrize( + "lhs", + [ + # Float + Series(np.random.default_rng(2).standard_normal(5)), + # Int + Series(np.random.default_rng(2).integers(5, size=5)), + # bool doesn't work with numexpr but works elsewhere + Series(np.random.default_rng(2).standard_normal(5) > 0.5), + ], + ) + def test_series_pos(self, lhs, engine, parser): + expr = "+lhs" + expect = lhs + + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + def test_scalar_unary(self, engine, parser): + msg = "bad operand type for unary ~: 'float'" + warn = None + if PY312 and not (engine == "numexpr" and parser == "pandas"): + warn = DeprecationWarning + with pytest.raises(TypeError, match=msg): + pd.eval("~1.0", engine=engine, parser=parser) + + assert pd.eval("-1.0", parser=parser, engine=engine) == -1.0 + assert pd.eval("+1.0", parser=parser, engine=engine) == +1.0 + assert pd.eval("~1", parser=parser, engine=engine) == ~1 + assert pd.eval("-1", parser=parser, engine=engine) == -1 + assert pd.eval("+1", parser=parser, engine=engine) == +1 + with tm.assert_produces_warning( + warn, match="Bitwise inversion", check_stacklevel=False + ): + assert pd.eval("~True", parser=parser, engine=engine) == ~True + with tm.assert_produces_warning( + warn, match="Bitwise inversion", check_stacklevel=False + ): + assert pd.eval("~False", parser=parser, engine=engine) == ~False + assert pd.eval("-True", parser=parser, engine=engine) == -True + assert pd.eval("-False", parser=parser, engine=engine) == -False + assert pd.eval("+True", parser=parser, engine=engine) == +True + assert pd.eval("+False", parser=parser, engine=engine) == +False + + def test_unary_in_array(self): + # GH 11235 + # TODO: 2022-01-29: result return list with numexpr 2.7.3 in CI + # but cannot reproduce locally + result = np.array( + pd.eval("[-True, True, +True, -False, False, +False, -37, 37, ~37, +37]"), + dtype=np.object_, + ) + expected = np.array( + [ + -True, + True, + +True, + -False, + False, + +False, + -37, + 37, + ~37, + +37, + ], + dtype=np.object_, + ) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", [np.float32, np.float64]) + @pytest.mark.parametrize("expr", ["x < -0.1", "-5 > x"]) + def test_float_comparison_bin_op(self, dtype, expr): + # GH 16363 + df = DataFrame({"x": np.array([0], dtype=dtype)}) + res = df.eval(expr) + assert res.values == np.array([False]) + + def test_unary_in_function(self): + # GH 46471 + df = DataFrame({"x": [0, 1, np.nan]}) + + result = df.eval("x.fillna(-1)") + expected = df.x.fillna(-1) + # column name becomes None if using numexpr + # only check names when the engine is not numexpr + tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR) + + result = df.eval("x.shift(1, fill_value=-1)") + expected = df.x.shift(1, fill_value=-1) + tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR) + + @pytest.mark.parametrize( + "ex", + ( + "1 or 2", + "1 and 2", + "a and b", + "a or b", + "1 or 2 and (3 + 2) > 3", + "2 * x > 2 or 1 and 2", + "2 * df > 3 and 1 or a", + ), + ) + def test_disallow_scalar_bool_ops(self, ex, engine, parser): + x, a, b = np.random.default_rng(2).standard_normal(3), 1, 2 # noqa: F841 + df = DataFrame(np.random.default_rng(2).standard_normal((3, 2))) # noqa: F841 + + msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex, engine=engine, parser=parser) + + def test_identical(self, engine, parser): + # see gh-10546 + x = 1 + result = pd.eval("x", engine=engine, parser=parser) + assert result == 1 + assert is_scalar(result) + + x = 1.5 + result = pd.eval("x", engine=engine, parser=parser) + assert result == 1.5 + assert is_scalar(result) + + x = False + result = pd.eval("x", engine=engine, parser=parser) + assert not result + assert is_bool(result) + assert is_scalar(result) + + x = np.array([1]) + result = pd.eval("x", engine=engine, parser=parser) + tm.assert_numpy_array_equal(result, np.array([1])) + assert result.shape == (1,) + + x = np.array([1.5]) + result = pd.eval("x", engine=engine, parser=parser) + tm.assert_numpy_array_equal(result, np.array([1.5])) + assert result.shape == (1,) + + x = np.array([False]) # noqa: F841 + result = pd.eval("x", engine=engine, parser=parser) + tm.assert_numpy_array_equal(result, np.array([False])) + assert result.shape == (1,) + + def test_line_continuation(self, engine, parser): + # GH 11149 + exp = """1 + 2 * \ + 5 - 1 + 2 """ + result = pd.eval(exp, engine=engine, parser=parser) + assert result == 12 + + def test_float_truncation(self, engine, parser): + # GH 14241 + exp = "1000000000.006" + result = pd.eval(exp, engine=engine, parser=parser) + expected = np.float64(exp) + assert result == expected + + df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) + cutoff = 1000000000.0006 + result = df.query(f"A < {cutoff:.4f}") + assert result.empty + + cutoff = 1000000000.0010 + result = df.query(f"A > {cutoff:.4f}") + expected = df.loc[[1, 2], :] + tm.assert_frame_equal(expected, result) + + exact = 1000000000.0011 + result = df.query(f"A == {exact:.4f}") + expected = df.loc[[1], :] + tm.assert_frame_equal(expected, result) + + def test_disallow_python_keywords(self): + # GH 18221 + df = DataFrame([[0, 0, 0]], columns=["foo", "bar", "class"]) + msg = "Python keyword not valid identifier in numexpr query" + with pytest.raises(SyntaxError, match=msg): + df.query("class == 0") + + df = DataFrame() + df.index.name = "lambda" + with pytest.raises(SyntaxError, match=msg): + df.query("lambda == 0") + + def test_true_false_logic(self): + # GH 25823 + # This behavior is deprecated in Python 3.12 + with tm.maybe_produces_warning( + DeprecationWarning, PY312, check_stacklevel=False + ): + assert pd.eval("not True") == -2 + assert pd.eval("not False") == -1 + assert pd.eval("True and not True") == 0 + + def test_and_logic_string_match(self): + # GH 25823 + event = Series({"a": "hello"}) + assert pd.eval(f"{event.str.match('hello').a}") + assert pd.eval(f"{event.str.match('hello').a and event.str.match('hello').a}") + + +# ------------------------------------- +# gh-12388: Typecasting rules consistency with python + + +class TestTypeCasting: + @pytest.mark.parametrize("op", ["+", "-", "*", "**", "/"]) + # maybe someday... numexpr has too many upcasting rules now + # chain(*(np.core.sctypes[x] for x in ['uint', 'int', 'float'])) + @pytest.mark.parametrize("dt", [np.float32, np.float64]) + @pytest.mark.parametrize("left_right", [("df", "3"), ("3", "df")]) + def test_binop_typecasting(self, engine, parser, op, dt, left_right): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)), dtype=dt) + left, right = left_right + s = f"{left} {op} {right}" + res = pd.eval(s, engine=engine, parser=parser) + assert df.values.dtype == dt + assert res.values.dtype == dt + tm.assert_frame_equal(res, eval(s)) + + +# ------------------------------------- +# Basic and complex alignment + + +def should_warn(*args): + not_mono = not any(map(operator.attrgetter("is_monotonic_increasing"), args)) + only_one_dt = reduce( + operator.xor, (issubclass(x.dtype.type, np.datetime64) for x in args) + ) + return not_mono and only_one_dt + + +class TestAlignment: + index_types = ["i", "s", "dt"] + lhs_index_types = index_types + ["s"] # 'p' + + def test_align_nested_unary_op(self, engine, parser): + s = "df * ~2" + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + res = pd.eval(s, engine=engine, parser=parser) + tm.assert_frame_equal(res, df * ~2) + + @pytest.mark.filterwarnings("always::RuntimeWarning") + @pytest.mark.parametrize("lr_idx_type", lhs_index_types) + @pytest.mark.parametrize("rr_idx_type", index_types) + @pytest.mark.parametrize("c_idx_type", index_types) + def test_basic_frame_alignment( + self, engine, parser, lr_idx_type, rr_idx_type, c_idx_type, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 10)), + index=idx_func_dict[lr_idx_type](10), + columns=idx_func_dict[c_idx_type](10), + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((20, 10)), + index=idx_func_dict[rr_idx_type](20), + columns=idx_func_dict[c_idx_type](10), + ) + # only warns if not monotonic and not sortable + if should_warn(df.index, df2.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df + df2", engine=engine, parser=parser) + else: + res = pd.eval("df + df2", engine=engine, parser=parser) + tm.assert_frame_equal(res, df + df2) + + @pytest.mark.parametrize("r_idx_type", lhs_index_types) + @pytest.mark.parametrize("c_idx_type", lhs_index_types) + def test_frame_comparison( + self, engine, parser, r_idx_type, c_idx_type, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 10)), + index=idx_func_dict[r_idx_type](10), + columns=idx_func_dict[c_idx_type](10), + ) + res = pd.eval("df < 2", engine=engine, parser=parser) + tm.assert_frame_equal(res, df < 2) + + df3 = DataFrame( + np.random.default_rng(2).standard_normal(df.shape), + index=df.index, + columns=df.columns, + ) + res = pd.eval("df < df3", engine=engine, parser=parser) + tm.assert_frame_equal(res, df < df3) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("r1", lhs_index_types) + @pytest.mark.parametrize("c1", index_types) + @pytest.mark.parametrize("r2", index_types) + @pytest.mark.parametrize("c2", index_types) + def test_medium_complex_frame_alignment( + self, engine, parser, r1, c1, r2, c2, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 2)), + index=idx_func_dict[r1](3), + columns=idx_func_dict[c1](2), + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), + index=idx_func_dict[r2](4), + columns=idx_func_dict[c2](2), + ) + df3 = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), + index=idx_func_dict[r2](5), + columns=idx_func_dict[c2](2), + ) + if should_warn(df.index, df2.index, df3.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df + df2 + df3", engine=engine, parser=parser) + else: + res = pd.eval("df + df2 + df3", engine=engine, parser=parser) + tm.assert_frame_equal(res, df + df2 + df3) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("index_name", ["index", "columns"]) + @pytest.mark.parametrize("c_idx_type", index_types) + @pytest.mark.parametrize("r_idx_type", lhs_index_types) + def test_basic_frame_series_alignment( + self, engine, parser, index_name, r_idx_type, c_idx_type, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 10)), + index=idx_func_dict[r_idx_type](10), + columns=idx_func_dict[c_idx_type](10), + ) + index = getattr(df, index_name) + s = Series(np.random.default_rng(2).standard_normal(5), index[:5]) + + if should_warn(df.index, s.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df + s", engine=engine, parser=parser) + else: + res = pd.eval("df + s", engine=engine, parser=parser) + + if r_idx_type == "dt" or c_idx_type == "dt": + expected = df.add(s) if engine == "numexpr" else df + s + else: + expected = df + s + tm.assert_frame_equal(res, expected) + + @pytest.mark.parametrize("index_name", ["index", "columns"]) + @pytest.mark.parametrize( + "r_idx_type, c_idx_type", + list(product(["i", "s"], ["i", "s"])) + [("dt", "dt")], + ) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_basic_series_frame_alignment( + self, request, engine, parser, index_name, r_idx_type, c_idx_type, idx_func_dict + ): + if ( + engine == "numexpr" + and parser in ("pandas", "python") + and index_name == "index" + and r_idx_type == "i" + and c_idx_type == "s" + ): + reason = ( + f"Flaky column ordering when engine={engine}, " + f"parser={parser}, index_name={index_name}, " + f"r_idx_type={r_idx_type}, c_idx_type={c_idx_type}" + ) + request.applymarker(pytest.mark.xfail(reason=reason, strict=False)) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 7)), + index=idx_func_dict[r_idx_type](10), + columns=idx_func_dict[c_idx_type](7), + ) + index = getattr(df, index_name) + s = Series(np.random.default_rng(2).standard_normal(5), index[:5]) + if should_warn(s.index, df.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("s + df", engine=engine, parser=parser) + else: + res = pd.eval("s + df", engine=engine, parser=parser) + + if r_idx_type == "dt" or c_idx_type == "dt": + expected = df.add(s) if engine == "numexpr" else s + df + else: + expected = s + df + tm.assert_frame_equal(res, expected) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("c_idx_type", index_types) + @pytest.mark.parametrize("r_idx_type", lhs_index_types) + @pytest.mark.parametrize("index_name", ["index", "columns"]) + @pytest.mark.parametrize("op", ["+", "*"]) + def test_series_frame_commutativity( + self, engine, parser, index_name, op, r_idx_type, c_idx_type, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 10)), + index=idx_func_dict[r_idx_type](10), + columns=idx_func_dict[c_idx_type](10), + ) + index = getattr(df, index_name) + s = Series(np.random.default_rng(2).standard_normal(5), index[:5]) + + lhs = f"s {op} df" + rhs = f"df {op} s" + if should_warn(df.index, s.index): + with tm.assert_produces_warning(RuntimeWarning): + a = pd.eval(lhs, engine=engine, parser=parser) + with tm.assert_produces_warning(RuntimeWarning): + b = pd.eval(rhs, engine=engine, parser=parser) + else: + a = pd.eval(lhs, engine=engine, parser=parser) + b = pd.eval(rhs, engine=engine, parser=parser) + + if r_idx_type != "dt" and c_idx_type != "dt": + if engine == "numexpr": + tm.assert_frame_equal(a, b) + + @pytest.mark.filterwarnings("always::RuntimeWarning") + @pytest.mark.parametrize("r1", lhs_index_types) + @pytest.mark.parametrize("c1", index_types) + @pytest.mark.parametrize("r2", index_types) + @pytest.mark.parametrize("c2", index_types) + def test_complex_series_frame_alignment( + self, engine, parser, r1, c1, r2, c2, idx_func_dict + ): + n = 3 + m1 = 5 + m2 = 2 * m1 + df = DataFrame( + np.random.default_rng(2).standard_normal((m1, n)), + index=idx_func_dict[r1](m1), + columns=idx_func_dict[c1](n), + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((m2, n)), + index=idx_func_dict[r2](m2), + columns=idx_func_dict[c2](n), + ) + index = df2.columns + ser = Series(np.random.default_rng(2).standard_normal(n), index[:n]) + + if r2 == "dt" or c2 == "dt": + if engine == "numexpr": + expected2 = df2.add(ser) + else: + expected2 = df2 + ser + else: + expected2 = df2 + ser + + if r1 == "dt" or c1 == "dt": + if engine == "numexpr": + expected = expected2.add(df) + else: + expected = expected2 + df + else: + expected = expected2 + df + + if should_warn(df2.index, ser.index, df.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df2 + ser + df", engine=engine, parser=parser) + else: + res = pd.eval("df2 + ser + df", engine=engine, parser=parser) + assert res.shape == expected.shape + tm.assert_frame_equal(res, expected) + + def test_performance_warning_for_poor_alignment(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((1000, 10))) + s = Series(np.random.default_rng(2).standard_normal(10000)) + if engine == "numexpr": + seen = PerformanceWarning + else: + seen = False + + with tm.assert_produces_warning(seen): + pd.eval("df + s", engine=engine, parser=parser) + + s = Series(np.random.default_rng(2).standard_normal(1000)) + with tm.assert_produces_warning(False): + pd.eval("df + s", engine=engine, parser=parser) + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 10000))) + s = Series(np.random.default_rng(2).standard_normal(10000)) + with tm.assert_produces_warning(False): + pd.eval("df + s", engine=engine, parser=parser) + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 10))) + s = Series(np.random.default_rng(2).standard_normal(10000)) + + is_python_engine = engine == "python" + + if not is_python_engine: + wrn = PerformanceWarning + else: + wrn = False + + with tm.assert_produces_warning(wrn) as w: + pd.eval("df + s", engine=engine, parser=parser) + + if not is_python_engine: + assert len(w) == 1 + msg = str(w[0].message) + logged = np.log10(s.size - df.shape[1]) + expected = ( + f"Alignment difference on axis 1 is larger " + f"than an order of magnitude on term 'df', " + f"by more than {logged:.4g}; performance may suffer." + ) + assert msg == expected + + +# ------------------------------------ +# Slightly more complex ops + + +class TestOperations: + def eval(self, *args, **kwargs): + kwargs["level"] = kwargs.pop("level", 0) + 1 + return pd.eval(*args, **kwargs) + + def test_simple_arith_ops(self, engine, parser): + exclude_arith = [] + if parser == "python": + exclude_arith = ["in", "not in"] + + arith_ops = [ + op + for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS + if op not in exclude_arith + ] + + ops = (op for op in arith_ops if op != "//") + + for op in ops: + ex = f"1 {op} 1" + ex2 = f"x {op} 1" + ex3 = f"1 {op} (x + 1)" + + if op in ("in", "not in"): + msg = "argument of type 'int' is not iterable" + with pytest.raises(TypeError, match=msg): + pd.eval(ex, engine=engine, parser=parser) + else: + expec = _eval_single_bin(1, op, 1, engine) + x = self.eval(ex, engine=engine, parser=parser) + assert x == expec + + expec = _eval_single_bin(x, op, 1, engine) + y = self.eval(ex2, local_dict={"x": x}, engine=engine, parser=parser) + assert y == expec + + expec = _eval_single_bin(1, op, x + 1, engine) + y = self.eval(ex3, local_dict={"x": x}, engine=engine, parser=parser) + assert y == expec + + @pytest.mark.parametrize("rhs", [True, False]) + @pytest.mark.parametrize("lhs", [True, False]) + @pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS) + def test_simple_bool_ops(self, rhs, lhs, op): + ex = f"{lhs} {op} {rhs}" + + if parser == "python" and op in ["and", "or"]: + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + self.eval(ex) + return + + res = self.eval(ex) + exp = eval(ex) + assert res == exp + + @pytest.mark.parametrize("rhs", [True, False]) + @pytest.mark.parametrize("lhs", [True, False]) + @pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS) + def test_bool_ops_with_constants(self, rhs, lhs, op): + ex = f"{lhs} {op} {rhs}" + + if parser == "python" and op in ["and", "or"]: + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + self.eval(ex) + return + + res = self.eval(ex) + exp = eval(ex) + assert res == exp + + def test_4d_ndarray_fails(self): + x = np.random.default_rng(2).standard_normal((3, 4, 5, 6)) + y = Series(np.random.default_rng(2).standard_normal(10)) + msg = "N-dimensional objects, where N > 2, are not supported with eval" + with pytest.raises(NotImplementedError, match=msg): + self.eval("x + y", local_dict={"x": x, "y": y}) + + def test_constant(self): + x = self.eval("1") + assert x == 1 + + def test_single_variable(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df2 = self.eval("df", local_dict={"df": df}) + tm.assert_frame_equal(df, df2) + + def test_failing_subscript_with_name_error(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841 + with pytest.raises(NameError, match="name 'x' is not defined"): + self.eval("df[x > 2] > 2") + + def test_lhs_expression_subscript(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + result = self.eval("(df + 1)[df > 2]", local_dict={"df": df}) + expected = (df + 1)[df > 2] + tm.assert_frame_equal(result, expected) + + def test_attr_expression(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc") + ) + expr1 = "df.a < df.b" + expec1 = df.a < df.b + expr2 = "df.a + df.b + df.c" + expec2 = df.a + df.b + df.c + expr3 = "df.a + df.b + df.c[df.b < 0]" + expec3 = df.a + df.b + df.c[df.b < 0] + exprs = expr1, expr2, expr3 + expecs = expec1, expec2, expec3 + for e, expec in zip(exprs, expecs): + tm.assert_series_equal(expec, self.eval(e, local_dict={"df": df})) + + def test_assignment_fails(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc") + ) + df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + expr1 = "df = df2" + msg = "cannot assign without a target object" + with pytest.raises(ValueError, match=msg): + self.eval(expr1, local_dict={"df": df, "df2": df2}) + + def test_assignment_column_multiple_raise(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # multiple assignees + with pytest.raises(SyntaxError, match="invalid syntax"): + df.eval("d c = a + b") + + def test_assignment_column_invalid_assign(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # invalid assignees + msg = "left hand side of an assignment must be a single name" + with pytest.raises(SyntaxError, match=msg): + df.eval("d,c = a + b") + + def test_assignment_column_invalid_assign_function_call(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + msg = "cannot assign to function call" + with pytest.raises(SyntaxError, match=msg): + df.eval('Timestamp("20131001") = a + b') + + def test_assignment_single_assign_existing(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # single assignment - existing variable + expected = df.copy() + expected["a"] = expected["a"] + expected["b"] + df.eval("a = a + b", inplace=True) + tm.assert_frame_equal(df, expected) + + def test_assignment_single_assign_new(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # single assignment - new variable + expected = df.copy() + expected["c"] = expected["a"] + expected["b"] + df.eval("c = a + b", inplace=True) + tm.assert_frame_equal(df, expected) + + def test_assignment_single_assign_local_overlap(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + df = df.copy() + a = 1 # noqa: F841 + df.eval("a = 1 + b", inplace=True) + + expected = df.copy() + expected["a"] = 1 + expected["b"] + tm.assert_frame_equal(df, expected) + + def test_assignment_single_assign_name(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + + a = 1 # noqa: F841 + old_a = df.a.copy() + df.eval("a = a + b", inplace=True) + result = old_a + df.b + tm.assert_series_equal(result, df.a, check_names=False) + assert result.name is None + + def test_assignment_multiple_raises(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # multiple assignment + df.eval("c = a + b", inplace=True) + msg = "can only assign a single expression" + with pytest.raises(SyntaxError, match=msg): + df.eval("c = a = b") + + def test_assignment_explicit(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # explicit targets + self.eval("c = df.a + df.b", local_dict={"df": df}, target=df, inplace=True) + expected = df.copy() + expected["c"] = expected["a"] + expected["b"] + tm.assert_frame_equal(df, expected) + + def test_column_in(self): + # GH 11235 + df = DataFrame({"a": [11], "b": [-32]}) + result = df.eval("a in [11, -32]") + expected = Series([True]) + # TODO: 2022-01-29: Name check failed with numexpr 2.7.3 in CI + # but cannot reproduce locally + tm.assert_series_equal(result, expected, check_names=False) + + @pytest.mark.xfail(reason="Unknown: Omitted test_ in name prior.") + def test_assignment_not_inplace(self): + # see gh-9297 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + + actual = df.eval("c = a + b", inplace=False) + assert actual is not None + + expected = df.copy() + expected["c"] = expected["a"] + expected["b"] + tm.assert_frame_equal(df, expected) + + def test_multi_line_expression(self, warn_copy_on_write): + # GH 11149 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + + expected["c"] = expected["a"] + expected["b"] + expected["d"] = expected["c"] + expected["b"] + answer = df.eval( + """ + c = a + b + d = c + b""", + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + expected["a"] = expected["a"] - 1 + expected["e"] = expected["a"] + 2 + answer = df.eval( + """ + a = a - 1 + e = a + 2""", + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + # multi-line not valid if not all assignments + msg = "Multi-line expressions are only valid if all expressions contain" + with pytest.raises(ValueError, match=msg): + df.eval( + """ + a = b + 2 + b - 2""", + inplace=False, + ) + + def test_multi_line_expression_not_inplace(self): + # GH 11149 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + + expected["c"] = expected["a"] + expected["b"] + expected["d"] = expected["c"] + expected["b"] + df = df.eval( + """ + c = a + b + d = c + b""", + inplace=False, + ) + tm.assert_frame_equal(expected, df) + + expected["a"] = expected["a"] - 1 + expected["e"] = expected["a"] + 2 + df = df.eval( + """ + a = a - 1 + e = a + 2""", + inplace=False, + ) + tm.assert_frame_equal(expected, df) + + def test_multi_line_expression_local_variable(self): + # GH 15342 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + + local_var = 7 + expected["c"] = expected["a"] * local_var + expected["d"] = expected["c"] + local_var + answer = df.eval( + """ + c = a * @local_var + d = c + @local_var + """, + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + def test_multi_line_expression_callable_local_variable(self): + # 26426 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + def local_func(a, b): + return b + + expected = df.copy() + expected["c"] = expected["a"] * local_func(1, 7) + expected["d"] = expected["c"] + local_func(1, 7) + answer = df.eval( + """ + c = a * @local_func(1, 7) + d = c + @local_func(1, 7) + """, + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + def test_multi_line_expression_callable_local_variable_with_kwargs(self): + # 26426 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + def local_func(a, b): + return b + + expected = df.copy() + expected["c"] = expected["a"] * local_func(b=7, a=1) + expected["d"] = expected["c"] + local_func(b=7, a=1) + answer = df.eval( + """ + c = a * @local_func(b=7, a=1) + d = c + @local_func(b=7, a=1) + """, + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + def test_assignment_in_query(self): + # GH 8664 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + msg = "cannot assign without a target object" + with pytest.raises(ValueError, match=msg): + df.query("a = 1") + tm.assert_frame_equal(df, df_orig) + + def test_query_inplace(self): + # see gh-11149 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + expected = expected[expected["a"] == 2] + df.query("a == 2", inplace=True) + tm.assert_frame_equal(expected, df) + + df = {} + expected = {"a": 3} + + self.eval("a = 1 + 2", target=df, inplace=True) + tm.assert_dict_equal(df, expected) + + @pytest.mark.parametrize("invalid_target", [1, "cat", [1, 2], np.array([]), (1, 3)]) + def test_cannot_item_assign(self, invalid_target): + msg = "Cannot assign expression output to target" + expression = "a = 1 + 2" + + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=invalid_target, inplace=True) + + if hasattr(invalid_target, "copy"): + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=invalid_target, inplace=False) + + @pytest.mark.parametrize("invalid_target", [1, "cat", (1, 3)]) + def test_cannot_copy_item(self, invalid_target): + msg = "Cannot return a copy of the target" + expression = "a = 1 + 2" + + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=invalid_target, inplace=False) + + @pytest.mark.parametrize("target", [1, "cat", [1, 2], np.array([]), (1, 3), {1: 2}]) + def test_inplace_no_assignment(self, target): + expression = "1 + 2" + + assert self.eval(expression, target=target, inplace=False) == 3 + + msg = "Cannot operate inplace if there is no assignment" + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=target, inplace=True) + + def test_basic_period_index_boolean_expression(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 2)), + columns=period_range("2020-01-01", freq="D", periods=2), + ) + e = df < 2 + r = self.eval("df < 2", local_dict={"df": df}) + x = df < 2 + + tm.assert_frame_equal(r, e) + tm.assert_frame_equal(x, e) + + def test_basic_period_index_subscript_expression(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 2)), + columns=period_range("2020-01-01", freq="D", periods=2), + ) + r = self.eval("df[df < 2 + 3]", local_dict={"df": df}) + e = df[df < 2 + 3] + tm.assert_frame_equal(r, e) + + def test_nested_period_index_subscript_expression(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 2)), + columns=period_range("2020-01-01", freq="D", periods=2), + ) + r = self.eval("df[df[df < 2] < 2] + df * 2", local_dict={"df": df}) + e = df[df[df < 2] < 2] + df * 2 + tm.assert_frame_equal(r, e) + + def test_date_boolean(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["dates1"] = date_range("1/1/2012", periods=5) + res = self.eval( + "df.dates1 < 20130101", + local_dict={"df": df}, + engine=engine, + parser=parser, + ) + expec = df.dates1 < "20130101" + tm.assert_series_equal(res, expec, check_names=False) + + def test_simple_in_ops(self, engine, parser): + if parser != "python": + res = pd.eval("1 in [1, 2]", engine=engine, parser=parser) + assert res + + res = pd.eval("2 in (1, 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("3 in (1, 2)", engine=engine, parser=parser) + assert not res + + res = pd.eval("3 not in (1, 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("[3] not in (1, 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("[3] in ([3], 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("[[3]] in [[[3]], 2]", engine=engine, parser=parser) + assert res + + res = pd.eval("(3,) in [(3,), 2]", engine=engine, parser=parser) + assert res + + res = pd.eval("(3,) not in [(3,), 2]", engine=engine, parser=parser) + assert not res + + res = pd.eval("[(3,)] in [[(3,)], 2]", engine=engine, parser=parser) + assert res + else: + msg = "'In' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval("1 in [1, 2]", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("2 in (1, 2)", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("3 in (1, 2)", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("[(3,)] in (1, 2, [(3,)])", engine=engine, parser=parser) + msg = "'NotIn' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval("3 not in (1, 2)", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("[3] not in (1, 2, [[3]])", engine=engine, parser=parser) + + def test_check_many_exprs(self, engine, parser): + a = 1 # noqa: F841 + expr = " * ".join("a" * 33) + expected = 1 + res = pd.eval(expr, engine=engine, parser=parser) + assert res == expected + + @pytest.mark.parametrize( + "expr", + [ + "df > 2 and df > 3", + "df > 2 or df > 3", + "not df > 2", + ], + ) + def test_fails_and_or_not(self, expr, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + if parser == "python": + msg = "'BoolOp' nodes are not implemented" + if "not" in expr: + msg = "'Not' nodes are not implemented" + + with pytest.raises(NotImplementedError, match=msg): + pd.eval( + expr, + local_dict={"df": df}, + parser=parser, + engine=engine, + ) + else: + # smoke-test, should not raise + pd.eval( + expr, + local_dict={"df": df}, + parser=parser, + engine=engine, + ) + + @pytest.mark.parametrize("char", ["|", "&"]) + def test_fails_ampersand_pipe(self, char, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841 + ex = f"(df + 2)[df > 1] > 0 {char} (df > 0)" + if parser == "python": + msg = "cannot evaluate scalar only bool ops" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex, parser=parser, engine=engine) + else: + # smoke-test, should not raise + pd.eval(ex, parser=parser, engine=engine) + + +class TestMath: + def eval(self, *args, **kwargs): + kwargs["level"] = kwargs.pop("level", 0) + 1 + return pd.eval(*args, **kwargs) + + @pytest.mark.skipif( + not NUMEXPR_INSTALLED, reason="Unary ops only implemented for numexpr" + ) + @pytest.mark.parametrize("fn", _unary_math_ops) + def test_unary_functions(self, fn): + df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + a = df.a + + expr = f"{fn}(a)" + got = self.eval(expr) + with np.errstate(all="ignore"): + expect = getattr(np, fn)(a) + tm.assert_series_equal(got, expect, check_names=False) + + @pytest.mark.parametrize("fn", _binary_math_ops) + def test_binary_functions(self, fn): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + a = df.a + b = df.b + + expr = f"{fn}(a, b)" + got = self.eval(expr) + with np.errstate(all="ignore"): + expect = getattr(np, fn)(a, b) + tm.assert_almost_equal(got, expect, check_names=False) + + def test_df_use_case(self, engine, parser): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + df.eval( + "e = arctan2(sin(a), b)", + engine=engine, + parser=parser, + inplace=True, + ) + got = df.e + expect = np.arctan2(np.sin(df.a), df.b) + tm.assert_series_equal(got, expect, check_names=False) + + def test_df_arithmetic_subexpression(self, engine, parser): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + df.eval("e = sin(a + b)", engine=engine, parser=parser, inplace=True) + got = df.e + expect = np.sin(df.a + df.b) + tm.assert_series_equal(got, expect, check_names=False) + + @pytest.mark.parametrize( + "dtype, expect_dtype", + [ + (np.int32, np.float64), + (np.int64, np.float64), + (np.float32, np.float32), + (np.float64, np.float64), + pytest.param(np.complex128, np.complex128, marks=td.skip_if_windows), + ], + ) + def test_result_types(self, dtype, expect_dtype, engine, parser): + # xref https://github.com/pandas-dev/pandas/issues/12293 + # this fails on Windows, apparently a floating point precision issue + + # Did not test complex64 because DataFrame is converting it to + # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952 + df = DataFrame( + {"a": np.random.default_rng(2).standard_normal(10).astype(dtype)} + ) + assert df.a.dtype == dtype + df.eval("b = sin(a)", engine=engine, parser=parser, inplace=True) + got = df.b + expect = np.sin(df.a) + assert expect.dtype == got.dtype + assert expect_dtype == got.dtype + tm.assert_series_equal(got, expect, check_names=False) + + def test_undefined_func(self, engine, parser): + df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + msg = '"mysin" is not a supported function' + + with pytest.raises(ValueError, match=msg): + df.eval("mysin(a)", engine=engine, parser=parser) + + def test_keyword_arg(self, engine, parser): + df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + msg = 'Function "sin" does not support keyword arguments' + + with pytest.raises(TypeError, match=msg): + df.eval("sin(x=a)", engine=engine, parser=parser) + + +_var_s = np.random.default_rng(2).standard_normal(10) + + +class TestScope: + def test_global_scope(self, engine, parser): + e = "_var_s * 2" + tm.assert_numpy_array_equal( + _var_s * 2, pd.eval(e, engine=engine, parser=parser) + ) + + def test_no_new_locals(self, engine, parser): + x = 1 + lcls = locals().copy() + pd.eval("x + 1", local_dict=lcls, engine=engine, parser=parser) + lcls2 = locals().copy() + lcls2.pop("lcls") + assert lcls == lcls2 + + def test_no_new_globals(self, engine, parser): + x = 1 # noqa: F841 + gbls = globals().copy() + pd.eval("x + 1", engine=engine, parser=parser) + gbls2 = globals().copy() + assert gbls == gbls2 + + def test_empty_locals(self, engine, parser): + # GH 47084 + x = 1 # noqa: F841 + msg = "name 'x' is not defined" + with pytest.raises(UndefinedVariableError, match=msg): + pd.eval("x + 1", engine=engine, parser=parser, local_dict={}) + + def test_empty_globals(self, engine, parser): + # GH 47084 + msg = "name '_var_s' is not defined" + e = "_var_s * 2" + with pytest.raises(UndefinedVariableError, match=msg): + pd.eval(e, engine=engine, parser=parser, global_dict={}) + + +@td.skip_if_no("numexpr") +def test_invalid_engine(): + msg = "Invalid engine 'asdf' passed" + with pytest.raises(KeyError, match=msg): + pd.eval("x + y", local_dict={"x": 1, "y": 2}, engine="asdf") + + +@td.skip_if_no("numexpr") +@pytest.mark.parametrize( + ("use_numexpr", "expected"), + ( + (True, "numexpr"), + (False, "python"), + ), +) +def test_numexpr_option_respected(use_numexpr, expected): + # GH 32556 + from pandas.core.computation.eval import _check_engine + + with pd.option_context("compute.use_numexpr", use_numexpr): + result = _check_engine(None) + assert result == expected + + +@td.skip_if_no("numexpr") +def test_numexpr_option_incompatible_op(): + # GH 32556 + with pd.option_context("compute.use_numexpr", False): + df = DataFrame( + {"A": [True, False, True, False, None, None], "B": [1, 2, 3, 4, 5, 6]} + ) + result = df.query("A.isnull()") + expected = DataFrame({"A": [None, None], "B": [5, 6]}, index=[4, 5]) + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no("numexpr") +def test_invalid_parser(): + msg = "Invalid parser 'asdf' passed" + with pytest.raises(KeyError, match=msg): + pd.eval("x + y", local_dict={"x": 1, "y": 2}, parser="asdf") + + +_parsers: dict[str, type[BaseExprVisitor]] = { + "python": PythonExprVisitor, + "pytables": pytables.PyTablesExprVisitor, + "pandas": PandasExprVisitor, +} + + +@pytest.mark.parametrize("engine", ENGINES) +@pytest.mark.parametrize("parser", _parsers) +def test_disallowed_nodes(engine, parser): + VisitorClass = _parsers[parser] + inst = VisitorClass("x + 1", engine, parser) + + for ops in VisitorClass.unsupported_nodes: + msg = "nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + getattr(inst, ops)() + + +def test_syntax_error_exprs(engine, parser): + e = "s +" + with pytest.raises(SyntaxError, match="invalid syntax"): + pd.eval(e, engine=engine, parser=parser) + + +def test_name_error_exprs(engine, parser): + e = "s + t" + msg = "name 's' is not defined" + with pytest.raises(NameError, match=msg): + pd.eval(e, engine=engine, parser=parser) + + +@pytest.mark.parametrize("express", ["a + @b", "@a + b", "@a + @b"]) +def test_invalid_local_variable_reference(engine, parser, express): + a, b = 1, 2 # noqa: F841 + + if parser != "pandas": + with pytest.raises(SyntaxError, match="The '@' prefix is only"): + pd.eval(express, engine=engine, parser=parser) + else: + with pytest.raises(SyntaxError, match="The '@' prefix is not"): + pd.eval(express, engine=engine, parser=parser) + + +def test_numexpr_builtin_raises(engine, parser): + sin, dotted_line = 1, 2 + if engine == "numexpr": + msg = "Variables in expression .+" + with pytest.raises(NumExprClobberingError, match=msg): + pd.eval("sin + dotted_line", engine=engine, parser=parser) + else: + res = pd.eval("sin + dotted_line", engine=engine, parser=parser) + assert res == sin + dotted_line + + +def test_bad_resolver_raises(engine, parser): + cannot_resolve = 42, 3.0 + with pytest.raises(TypeError, match="Resolver of type .+"): + pd.eval("1 + 2", resolvers=cannot_resolve, engine=engine, parser=parser) + + +def test_empty_string_raises(engine, parser): + # GH 13139 + with pytest.raises(ValueError, match="expr cannot be an empty string"): + pd.eval("", engine=engine, parser=parser) + + +def test_more_than_one_expression_raises(engine, parser): + with pytest.raises(SyntaxError, match="only a single expression is allowed"): + pd.eval("1 + 1; 2 + 2", engine=engine, parser=parser) + + +@pytest.mark.parametrize("cmp", ("and", "or")) +@pytest.mark.parametrize("lhs", (int, float)) +@pytest.mark.parametrize("rhs", (int, float)) +def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser): + gen = { + int: lambda: np.random.default_rng(2).integers(10), + float: np.random.default_rng(2).standard_normal, + } + + mid = gen[lhs]() # noqa: F841 + lhs = gen[lhs]() + rhs = gen[rhs]() + + ex1 = f"lhs {cmp} mid {cmp} rhs" + ex2 = f"lhs {cmp} mid and mid {cmp} rhs" + ex3 = f"(lhs {cmp} mid) & (mid {cmp} rhs)" + for ex in (ex1, ex2, ex3): + msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex, engine=engine, parser=parser) + + +@pytest.mark.parametrize( + "other", + [ + "'x'", + "...", + ], +) +def test_equals_various(other): + df = DataFrame({"A": ["a", "b", "c"]}, dtype=object) + result = df.eval(f"A == {other}") + expected = Series([False, False, False], name="A") + if USE_NUMEXPR: + # https://github.com/pandas-dev/pandas/issues/10239 + # lose name with numexpr engine. Remove when that's fixed. + expected.name = None + tm.assert_series_equal(result, expected) + + +def test_inf(engine, parser): + s = "inf + 1" + expected = np.inf + result = pd.eval(s, engine=engine, parser=parser) + assert result == expected + + +@pytest.mark.parametrize("column", ["Temp(°C)", "Capacitance(μF)"]) +def test_query_token(engine, column): + # See: https://github.com/pandas-dev/pandas/pull/42826 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=[column, "b"] + ) + expected = df[df[column] > 5] + query_string = f"`{column}` > 5" + result = df.query(query_string, engine=engine) + tm.assert_frame_equal(result, expected) + + +def test_negate_lt_eq_le(engine, parser): + df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"]) + expected = df[~(df.cat > 0)] + + result = df.query("~(cat > 0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + if parser == "python": + msg = "'Not' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query("not (cat > 0)", engine=engine, parser=parser) + else: + result = df.query("not (cat > 0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "column", + DEFAULT_GLOBALS.keys(), +) +def test_eval_no_support_column_name(request, column): + # GH 44603 + if column in ["True", "False", "inf", "Inf"]: + request.applymarker( + pytest.mark.xfail( + raises=KeyError, + reason=f"GH 47859 DataFrame eval not supported with {column}", + ) + ) + + df = DataFrame( + np.random.default_rng(2).integers(0, 100, size=(10, 2)), + columns=[column, "col1"], + ) + expected = df[df[column] > 6] + result = df.query(f"{column}>6") + + tm.assert_frame_equal(result, expected) + + +def test_set_inplace(using_copy_on_write, warn_copy_on_write): + # https://github.com/pandas-dev/pandas/issues/47449 + # Ensure we don't only update the DataFrame inplace, but also the actual + # column values, such that references to this column also get updated + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result_view = df[:] + ser = df["A"] + with tm.assert_cow_warning(warn_copy_on_write): + df.eval("A = B + C", inplace=True) + expected = DataFrame({"A": [11, 13, 15], "B": [4, 5, 6], "C": [7, 8, 9]}) + tm.assert_frame_equal(df, expected) + if not using_copy_on_write: + tm.assert_series_equal(ser, expected["A"]) + tm.assert_series_equal(result_view["A"], expected["A"]) + else: + expected = Series([1, 2, 3], name="A") + tm.assert_series_equal(ser, expected) + tm.assert_series_equal(result_view["A"], expected) + + +class TestValidate: + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, value): + msg = 'For argument "inplace" expected type bool, received type' + with pytest.raises(ValueError, match=msg): + pd.eval("2+2", inplace=value) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..01b49b5e5b63323b065ec11fc34f6c247a7b0350 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__init__.py @@ -0,0 +1,15 @@ +import numpy as np + +import pandas as pd + +object_pyarrow_numpy = ("object", "string[pyarrow_numpy]") + + +def _convert_na_value(ser, expected): + if ser.dtype != object: + if ser.dtype.storage == "pyarrow_numpy": + expected = expected.fillna(np.nan) + else: + # GH#18463 + expected = expected.fillna(pd.NA) + return expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..602d1266a27c5cd0c74ba3da4c1fa21b5fbe75eb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f88434aab7abac54230df730432375bb168c97c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_cat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_cat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b0ccc8bdfa759663b171039111fd1351f8be97c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_cat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_extract.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..804a847d8ea8d7223817eb38fbe346afc38a19fe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_extract.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_find_replace.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_find_replace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a765d2a4f755d898cdc6492ddca6adf7338b444b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_find_replace.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_get_dummies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_get_dummies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc65a4e3b699ec553fe786eb2e5d092f0d1759ac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_get_dummies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_string_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_string_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a82c7c31f0892dd7e1f751fca712d6630cff8367 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/__pycache__/test_string_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/conftest.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..036e4de20ba538bc4dbe6636fc802fb9c8f10e5d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/conftest.py @@ -0,0 +1,132 @@ +import pytest + +from pandas import Series +from pandas.core.strings.accessor import StringMethods + +_any_string_method = [ + ("cat", (), {"sep": ","}), + ("cat", (Series(list("zyx")),), {"sep": ",", "join": "left"}), + ("center", (10,), {}), + ("contains", ("a",), {}), + ("count", ("a",), {}), + ("decode", ("UTF-8",), {}), + ("encode", ("UTF-8",), {}), + ("endswith", ("a",), {}), + ("endswith", ((),), {}), + ("endswith", (("a",),), {}), + ("endswith", (("a", "b"),), {}), + ("endswith", (("a", "MISSING"),), {}), + ("endswith", ("a",), {"na": True}), + ("endswith", ("a",), {"na": False}), + ("extract", ("([a-z]*)",), {"expand": False}), + ("extract", ("([a-z]*)",), {"expand": True}), + ("extractall", ("([a-z]*)",), {}), + ("find", ("a",), {}), + ("findall", ("a",), {}), + ("get", (0,), {}), + # because "index" (and "rindex") fail intentionally + # if the string is not found, search only for empty string + ("index", ("",), {}), + ("join", (",",), {}), + ("ljust", (10,), {}), + ("match", ("a",), {}), + ("fullmatch", ("a",), {}), + ("normalize", ("NFC",), {}), + ("pad", (10,), {}), + ("partition", (" ",), {"expand": False}), + ("partition", (" ",), {"expand": True}), + ("repeat", (3,), {}), + ("replace", ("a", "z"), {}), + ("rfind", ("a",), {}), + ("rindex", ("",), {}), + ("rjust", (10,), {}), + ("rpartition", (" ",), {"expand": False}), + ("rpartition", (" ",), {"expand": True}), + ("slice", (0, 1), {}), + ("slice_replace", (0, 1, "z"), {}), + ("split", (" ",), {"expand": False}), + ("split", (" ",), {"expand": True}), + ("startswith", ("a",), {}), + ("startswith", (("a",),), {}), + ("startswith", (("a", "b"),), {}), + ("startswith", (("a", "MISSING"),), {}), + ("startswith", ((),), {}), + ("startswith", ("a",), {"na": True}), + ("startswith", ("a",), {"na": False}), + ("removeprefix", ("a",), {}), + ("removesuffix", ("a",), {}), + # translating unicode points of "a" to "d" + ("translate", ({97: 100},), {}), + ("wrap", (2,), {}), + ("zfill", (10,), {}), +] + list( + zip( + [ + # methods without positional arguments: zip with empty tuple and empty dict + "capitalize", + "cat", + "get_dummies", + "isalnum", + "isalpha", + "isdecimal", + "isdigit", + "islower", + "isnumeric", + "isspace", + "istitle", + "isupper", + "len", + "lower", + "lstrip", + "partition", + "rpartition", + "rsplit", + "rstrip", + "slice", + "slice_replace", + "split", + "strip", + "swapcase", + "title", + "upper", + "casefold", + ], + [()] * 100, + [{}] * 100, + ) +) +ids, _, _ = zip(*_any_string_method) # use method name as fixture-id +missing_methods = {f for f in dir(StringMethods) if not f.startswith("_")} - set(ids) + +# test that the above list captures all methods of StringMethods +assert not missing_methods + + +@pytest.fixture(params=_any_string_method, ids=ids) +def any_string_method(request): + """ + Fixture for all public methods of `StringMethods` + + This fixture returns a tuple of the method name and sample arguments + necessary to call the method. + + Returns + ------- + method_name : str + The name of the method in `StringMethods` + args : tuple + Sample values for the positional arguments + kwargs : dict + Sample values for the keyword arguments + + Examples + -------- + >>> def test_something(any_string_method): + ... s = Series(['a', 'b', np.nan, 'd']) + ... + ... method_name, args, kwargs = any_string_method + ... method = getattr(s.str, method_name) + ... # will not raise + ... method(*args, **kwargs) + """ + return request.param diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_api.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..31e005466af7b935c446e01f90ed87bb4736b84b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_api.py @@ -0,0 +1,198 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalDtype, + DataFrame, + Index, + MultiIndex, + Series, + _testing as tm, + option_context, +) +from pandas.core.strings.accessor import StringMethods + +# subset of the full set from pandas/conftest.py +_any_allowed_skipna_inferred_dtype = [ + ("string", ["a", np.nan, "c"]), + ("bytes", [b"a", np.nan, b"c"]), + ("empty", [np.nan, np.nan, np.nan]), + ("empty", []), + ("mixed-integer", ["a", np.nan, 2]), +] +ids, _ = zip(*_any_allowed_skipna_inferred_dtype) # use inferred type as id + + +@pytest.fixture(params=_any_allowed_skipna_inferred_dtype, ids=ids) +def any_allowed_skipna_inferred_dtype(request): + """ + Fixture for all (inferred) dtypes allowed in StringMethods.__init__ + + The covered (inferred) types are: + * 'string' + * 'empty' + * 'bytes' + * 'mixed' + * 'mixed-integer' + + Returns + ------- + inferred_dtype : str + The string for the inferred dtype from _libs.lib.infer_dtype + values : np.ndarray + An array of object dtype that will be inferred to have + `inferred_dtype` + + Examples + -------- + >>> from pandas._libs import lib + >>> + >>> def test_something(any_allowed_skipna_inferred_dtype): + ... inferred_dtype, values = any_allowed_skipna_inferred_dtype + ... # will pass + ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype + ... + ... # constructor for .str-accessor will also pass + ... Series(values).str + """ + inferred_dtype, values = request.param + values = np.array(values, dtype=object) # object dtype to avoid casting + + # correctness of inference tested in tests/dtypes/test_inference.py + return inferred_dtype, values + + +def test_api(any_string_dtype): + # GH 6106, GH 9322 + assert Series.str is StringMethods + assert isinstance(Series([""], dtype=any_string_dtype).str, StringMethods) + + +def test_api_mi_raises(): + # GH 23679 + mi = MultiIndex.from_arrays([["a", "b", "c"]]) + msg = "Can only use .str accessor with Index, not MultiIndex" + with pytest.raises(AttributeError, match=msg): + mi.str + assert not hasattr(mi, "str") + + +@pytest.mark.parametrize("dtype", [object, "category"]) +def test_api_per_dtype(index_or_series, dtype, any_skipna_inferred_dtype): + # one instance of parametrized fixture + box = index_or_series + inferred_dtype, values = any_skipna_inferred_dtype + + t = box(values, dtype=dtype) # explicit dtype to avoid casting + + types_passing_constructor = [ + "string", + "unicode", + "empty", + "bytes", + "mixed", + "mixed-integer", + ] + if inferred_dtype in types_passing_constructor: + # GH 6106 + assert isinstance(t.str, StringMethods) + else: + # GH 9184, GH 23011, GH 23163 + msg = "Can only use .str accessor with string values.*" + with pytest.raises(AttributeError, match=msg): + t.str + assert not hasattr(t, "str") + + +@pytest.mark.parametrize("dtype", [object, "category"]) +def test_api_per_method( + index_or_series, + dtype, + any_allowed_skipna_inferred_dtype, + any_string_method, + request, +): + # this test does not check correctness of the different methods, + # just that the methods work on the specified (inferred) dtypes, + # and raise on all others + box = index_or_series + + # one instance of each parametrized fixture + inferred_dtype, values = any_allowed_skipna_inferred_dtype + method_name, args, kwargs = any_string_method + + reason = None + if box is Index and values.size == 0: + if method_name in ["partition", "rpartition"] and kwargs.get("expand", True): + raises = TypeError + reason = "Method cannot deal with empty Index" + elif method_name == "split" and kwargs.get("expand", None): + raises = TypeError + reason = "Split fails on empty Series when expand=True" + elif method_name == "get_dummies": + raises = ValueError + reason = "Need to fortify get_dummies corner cases" + + elif ( + box is Index + and inferred_dtype == "empty" + and dtype == object + and method_name == "get_dummies" + ): + raises = ValueError + reason = "Need to fortify get_dummies corner cases" + + if reason is not None: + mark = pytest.mark.xfail(raises=raises, reason=reason) + request.applymarker(mark) + + t = box(values, dtype=dtype) # explicit dtype to avoid casting + method = getattr(t.str, method_name) + + bytes_allowed = method_name in ["decode", "get", "len", "slice"] + # as of v0.23.4, all methods except 'cat' are very lenient with the + # allowed data types, just returning NaN for entries that error. + # This could be changed with an 'errors'-kwarg to the `str`-accessor, + # see discussion in GH 13877 + mixed_allowed = method_name not in ["cat"] + + allowed_types = ( + ["string", "unicode", "empty"] + + ["bytes"] * bytes_allowed + + ["mixed", "mixed-integer"] * mixed_allowed + ) + + if inferred_dtype in allowed_types: + # xref GH 23555, GH 23556 + with option_context("future.no_silent_downcasting", True): + method(*args, **kwargs) # works! + else: + # GH 23011, GH 23163 + msg = ( + f"Cannot use .str.{method_name} with values of " + f"inferred dtype {repr(inferred_dtype)}." + ) + with pytest.raises(TypeError, match=msg): + method(*args, **kwargs) + + +def test_api_for_categorical(any_string_method, any_string_dtype): + # https://github.com/pandas-dev/pandas/issues/10661 + s = Series(list("aabb"), dtype=any_string_dtype) + s = s + " " + s + c = s.astype("category") + c = c.astype(CategoricalDtype(c.dtype.categories.astype("object"))) + assert isinstance(c.str, StringMethods) + + method_name, args, kwargs = any_string_method + + result = getattr(c.str, method_name)(*args, **kwargs) + expected = getattr(s.astype("object").str, method_name)(*args, **kwargs) + + if isinstance(result, DataFrame): + tm.assert_frame_equal(result, expected) + elif isinstance(result, Series): + tm.assert_series_equal(result, expected) + else: + # str.cat(others=None) returns string, for example + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_case_justify.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_case_justify.py new file mode 100644 index 0000000000000000000000000000000000000000..41aedae90ca7656a3df7c4f09beee79b2f533741 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_case_justify.py @@ -0,0 +1,427 @@ +from datetime import datetime +import operator + +import numpy as np +import pytest + +from pandas import ( + Series, + _testing as tm, +) + + +def test_title(any_string_dtype): + s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype) + result = s.str.title() + expected = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_title_mixed_object(): + s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0]) + result = s.str.title() + expected = Series( + ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan], + dtype=object, + ) + tm.assert_almost_equal(result, expected) + + +def test_lower_upper(any_string_dtype): + s = Series(["om", np.nan, "nom", "nom"], dtype=any_string_dtype) + + result = s.str.upper() + expected = Series(["OM", np.nan, "NOM", "NOM"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + result = result.str.lower() + tm.assert_series_equal(result, s) + + +def test_lower_upper_mixed_object(): + s = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0]) + + result = s.str.upper() + expected = Series( + ["A", np.nan, "B", np.nan, np.nan, "FOO", None, np.nan, np.nan], dtype=object + ) + tm.assert_series_equal(result, expected) + + result = s.str.lower() + expected = Series( + ["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data, expected", + [ + ( + ["FOO", "BAR", np.nan, "Blah", "blurg"], + ["Foo", "Bar", np.nan, "Blah", "Blurg"], + ), + (["a", "b", "c"], ["A", "B", "C"]), + (["a b", "a bc. de"], ["A b", "A bc. de"]), + ], +) +def test_capitalize(data, expected, any_string_dtype): + s = Series(data, dtype=any_string_dtype) + result = s.str.capitalize() + expected = Series(expected, dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_capitalize_mixed_object(): + s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0]) + result = s.str.capitalize() + expected = Series( + ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + +def test_swapcase(any_string_dtype): + s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype) + result = s.str.swapcase() + expected = Series(["foo", "bar", np.nan, "bLAH", "BLURG"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_swapcase_mixed_object(): + s = Series(["FOO", np.nan, "bar", True, datetime.today(), "Blah", None, 1, 2.0]) + result = s.str.swapcase() + expected = Series( + ["foo", np.nan, "BAR", np.nan, np.nan, "bLAH", None, np.nan, np.nan], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + +def test_casefold(): + # GH25405 + expected = Series(["ss", np.nan, "case", "ssd"]) + s = Series(["ß", np.nan, "case", "ßd"]) + result = s.str.casefold() + + tm.assert_series_equal(result, expected) + + +def test_casemethods(any_string_dtype): + values = ["aaa", "bbb", "CCC", "Dddd", "eEEE"] + s = Series(values, dtype=any_string_dtype) + assert s.str.lower().tolist() == [v.lower() for v in values] + assert s.str.upper().tolist() == [v.upper() for v in values] + assert s.str.title().tolist() == [v.title() for v in values] + assert s.str.capitalize().tolist() == [v.capitalize() for v in values] + assert s.str.swapcase().tolist() == [v.swapcase() for v in values] + + +def test_pad(any_string_dtype): + s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype) + + result = s.str.pad(5, side="left") + expected = Series( + [" a", " b", np.nan, " c", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + result = s.str.pad(5, side="right") + expected = Series( + ["a ", "b ", np.nan, "c ", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + result = s.str.pad(5, side="both") + expected = Series( + [" a ", " b ", np.nan, " c ", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + +def test_pad_mixed_object(): + s = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0]) + + result = s.str.pad(5, side="left") + expected = Series( + [" a", np.nan, " b", np.nan, np.nan, " ee", None, np.nan, np.nan], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + result = s.str.pad(5, side="right") + expected = Series( + ["a ", np.nan, "b ", np.nan, np.nan, "ee ", None, np.nan, np.nan], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + result = s.str.pad(5, side="both") + expected = Series( + [" a ", np.nan, " b ", np.nan, np.nan, " ee ", None, np.nan, np.nan], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + +def test_pad_fillchar(any_string_dtype): + s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype) + + result = s.str.pad(5, side="left", fillchar="X") + expected = Series( + ["XXXXa", "XXXXb", np.nan, "XXXXc", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + result = s.str.pad(5, side="right", fillchar="X") + expected = Series( + ["aXXXX", "bXXXX", np.nan, "cXXXX", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + result = s.str.pad(5, side="both", fillchar="X") + expected = Series( + ["XXaXX", "XXbXX", np.nan, "XXcXX", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + +def test_pad_fillchar_bad_arg_raises(any_string_dtype): + s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype) + + msg = "fillchar must be a character, not str" + with pytest.raises(TypeError, match=msg): + s.str.pad(5, fillchar="XY") + + msg = "fillchar must be a character, not int" + with pytest.raises(TypeError, match=msg): + s.str.pad(5, fillchar=5) + + +@pytest.mark.parametrize("method_name", ["center", "ljust", "rjust", "zfill", "pad"]) +def test_pad_width_bad_arg_raises(method_name, any_string_dtype): + # see gh-13598 + s = Series(["1", "22", "a", "bb"], dtype=any_string_dtype) + op = operator.methodcaller(method_name, "f") + + msg = "width must be of integer type, not str" + with pytest.raises(TypeError, match=msg): + op(s.str) + + +def test_center_ljust_rjust(any_string_dtype): + s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype) + + result = s.str.center(5) + expected = Series( + [" a ", " b ", np.nan, " c ", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + result = s.str.ljust(5) + expected = Series( + ["a ", "b ", np.nan, "c ", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + result = s.str.rjust(5) + expected = Series( + [" a", " b", np.nan, " c", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + +def test_center_ljust_rjust_mixed_object(): + s = Series(["a", np.nan, "b", True, datetime.today(), "c", "eee", None, 1, 2.0]) + + result = s.str.center(5) + expected = Series( + [ + " a ", + np.nan, + " b ", + np.nan, + np.nan, + " c ", + " eee ", + None, + np.nan, + np.nan, + ], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + result = s.str.ljust(5) + expected = Series( + [ + "a ", + np.nan, + "b ", + np.nan, + np.nan, + "c ", + "eee ", + None, + np.nan, + np.nan, + ], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + result = s.str.rjust(5) + expected = Series( + [ + " a", + np.nan, + " b", + np.nan, + np.nan, + " c", + " eee", + None, + np.nan, + np.nan, + ], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + +def test_center_ljust_rjust_fillchar(any_string_dtype): + if any_string_dtype == "string[pyarrow_numpy]": + pytest.skip( + "Arrow logic is different, " + "see https://github.com/pandas-dev/pandas/pull/54533/files#r1299808126", + ) + s = Series(["a", "bb", "cccc", "ddddd", "eeeeee"], dtype=any_string_dtype) + + result = s.str.center(5, fillchar="X") + expected = Series( + ["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + expected = np.array([v.center(5, "X") for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) + + result = s.str.ljust(5, fillchar="X") + expected = Series( + ["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + expected = np.array([v.ljust(5, "X") for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) + + result = s.str.rjust(5, fillchar="X") + expected = Series( + ["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + expected = np.array([v.rjust(5, "X") for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) + + +def test_center_ljust_rjust_fillchar_bad_arg_raises(any_string_dtype): + s = Series(["a", "bb", "cccc", "ddddd", "eeeeee"], dtype=any_string_dtype) + + # If fillchar is not a character, normal str raises TypeError + # 'aaa'.ljust(5, 'XY') + # TypeError: must be char, not str + template = "fillchar must be a character, not {dtype}" + + with pytest.raises(TypeError, match=template.format(dtype="str")): + s.str.center(5, fillchar="XY") + + with pytest.raises(TypeError, match=template.format(dtype="str")): + s.str.ljust(5, fillchar="XY") + + with pytest.raises(TypeError, match=template.format(dtype="str")): + s.str.rjust(5, fillchar="XY") + + with pytest.raises(TypeError, match=template.format(dtype="int")): + s.str.center(5, fillchar=1) + + with pytest.raises(TypeError, match=template.format(dtype="int")): + s.str.ljust(5, fillchar=1) + + with pytest.raises(TypeError, match=template.format(dtype="int")): + s.str.rjust(5, fillchar=1) + + +def test_zfill(any_string_dtype): + s = Series(["1", "22", "aaa", "333", "45678"], dtype=any_string_dtype) + + result = s.str.zfill(5) + expected = Series( + ["00001", "00022", "00aaa", "00333", "45678"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + expected = np.array([v.zfill(5) for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) + + result = s.str.zfill(3) + expected = Series(["001", "022", "aaa", "333", "45678"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.zfill(3) for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) + + s = Series(["1", np.nan, "aaa", np.nan, "45678"], dtype=any_string_dtype) + result = s.str.zfill(5) + expected = Series( + ["00001", np.nan, "00aaa", np.nan, "45678"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + +def test_wrap(any_string_dtype): + # test values are: two words less than width, two words equal to width, + # two words greater than width, one word less than width, one word + # equal to width, one word greater than width, multiple tokens with + # trailing whitespace equal to width + s = Series( + [ + "hello world", + "hello world!", + "hello world!!", + "abcdefabcde", + "abcdefabcdef", + "abcdefabcdefa", + "ab ab ab ab ", + "ab ab ab ab a", + "\t", + ], + dtype=any_string_dtype, + ) + + # expected values + expected = Series( + [ + "hello world", + "hello world!", + "hello\nworld!!", + "abcdefabcde", + "abcdefabcdef", + "abcdefabcdef\na", + "ab ab ab ab", + "ab ab ab ab\na", + "", + ], + dtype=any_string_dtype, + ) + + result = s.str.wrap(12, break_long_words=True) + tm.assert_series_equal(result, expected) + + +def test_wrap_unicode(any_string_dtype): + # test with pre and post whitespace (non-unicode), NaN, and non-ascii Unicode + s = Series( + [" pre ", np.nan, "\xac\u20ac\U00008000 abadcafe"], dtype=any_string_dtype + ) + expected = Series( + [" pre", np.nan, "\xac\u20ac\U00008000 ab\nadcafe"], dtype=any_string_dtype + ) + result = s.str.wrap(6) + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_cat.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..c1e7ad6e02779259f37bd6bad57f03428d9c3055 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_cat.py @@ -0,0 +1,427 @@ +import re + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + _testing as tm, + concat, + option_context, +) + + +@pytest.mark.parametrize("other", [None, Series, Index]) +def test_str_cat_name(index_or_series, other): + # GH 21053 + box = index_or_series + values = ["a", "b"] + if other: + other = other(values) + else: + other = values + result = box(values, name="name").str.cat(other, sep=",") + assert result.name == "name" + + +@pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] +) +def test_str_cat(index_or_series, infer_string): + with option_context("future.infer_string", infer_string): + box = index_or_series + # test_cat above tests "str_cat" from ndarray; + # here testing "str.cat" from Series/Index to ndarray/list + s = box(["a", "a", "b", "b", "c", np.nan]) + + # single array + result = s.str.cat() + expected = "aabbc" + assert result == expected + + result = s.str.cat(na_rep="-") + expected = "aabbc-" + assert result == expected + + result = s.str.cat(sep="_", na_rep="NA") + expected = "a_a_b_b_c_NA" + assert result == expected + + t = np.array(["a", np.nan, "b", "d", "foo", np.nan], dtype=object) + expected = box(["aa", "a-", "bb", "bd", "cfoo", "--"]) + + # Series/Index with array + result = s.str.cat(t, na_rep="-") + tm.assert_equal(result, expected) + + # Series/Index with list + result = s.str.cat(list(t), na_rep="-") + tm.assert_equal(result, expected) + + # errors for incorrect lengths + rgx = r"If `others` contains arrays or lists \(or other list-likes.*" + z = Series(["1", "2", "3"]) + + with pytest.raises(ValueError, match=rgx): + s.str.cat(z.values) + + with pytest.raises(ValueError, match=rgx): + s.str.cat(list(z)) + + +def test_str_cat_raises_intuitive_error(index_or_series): + # GH 11334 + box = index_or_series + s = box(["a", "b", "c", "d"]) + message = "Did you mean to supply a `sep` keyword?" + with pytest.raises(ValueError, match=message): + s.str.cat("|") + with pytest.raises(ValueError, match=message): + s.str.cat(" ") + + +@pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] +) +@pytest.mark.parametrize("sep", ["", None]) +@pytest.mark.parametrize("dtype_target", ["object", "category"]) +@pytest.mark.parametrize("dtype_caller", ["object", "category"]) +def test_str_cat_categorical( + index_or_series, dtype_caller, dtype_target, sep, infer_string +): + box = index_or_series + + with option_context("future.infer_string", infer_string): + s = Index(["a", "a", "b", "a"], dtype=dtype_caller) + s = s if box == Index else Series(s, index=s, dtype=s.dtype) + t = Index(["b", "a", "b", "c"], dtype=dtype_target) + + expected = Index( + ["ab", "aa", "bb", "ac"], dtype=object if dtype_caller == "object" else None + ) + expected = ( + expected + if box == Index + else Series( + expected, index=Index(s, dtype=dtype_caller), dtype=expected.dtype + ) + ) + + # Series/Index with unaligned Index -> t.values + result = s.str.cat(t.values, sep=sep) + tm.assert_equal(result, expected) + + # Series/Index with Series having matching Index + t = Series(t.values, index=Index(s, dtype=dtype_caller)) + result = s.str.cat(t, sep=sep) + tm.assert_equal(result, expected) + + # Series/Index with Series.values + result = s.str.cat(t.values, sep=sep) + tm.assert_equal(result, expected) + + # Series/Index with Series having different Index + t = Series(t.values, index=t.values) + expected = Index( + ["aa", "aa", "bb", "bb", "aa"], + dtype=object if dtype_caller == "object" else None, + ) + dtype = object if dtype_caller == "object" else s.dtype.categories.dtype + expected = ( + expected + if box == Index + else Series( + expected, + index=Index(expected.str[:1], dtype=dtype), + dtype=expected.dtype, + ) + ) + + result = s.str.cat(t, sep=sep) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "data", + [[1, 2, 3], [0.1, 0.2, 0.3], [1, 2, "b"]], + ids=["integers", "floats", "mixed"], +) +# without dtype=object, np.array would cast [1, 2, 'b'] to ['1', '2', 'b'] +@pytest.mark.parametrize( + "box", + [Series, Index, list, lambda x: np.array(x, dtype=object)], + ids=["Series", "Index", "list", "np.array"], +) +def test_str_cat_wrong_dtype_raises(box, data): + # GH 22722 + s = Series(["a", "b", "c"]) + t = box(data) + + msg = "Concatenation requires list-likes containing only strings.*" + with pytest.raises(TypeError, match=msg): + # need to use outer and na_rep, as otherwise Index would not raise + s.str.cat(t, join="outer", na_rep="-") + + +def test_str_cat_mixed_inputs(index_or_series): + box = index_or_series + s = Index(["a", "b", "c", "d"]) + s = s if box == Index else Series(s, index=s) + + t = Series(["A", "B", "C", "D"], index=s.values) + d = concat([t, Series(s, index=s)], axis=1) + + expected = Index(["aAa", "bBb", "cCc", "dDd"]) + expected = expected if box == Index else Series(expected.values, index=s.values) + + # Series/Index with DataFrame + result = s.str.cat(d) + tm.assert_equal(result, expected) + + # Series/Index with two-dimensional ndarray + result = s.str.cat(d.values) + tm.assert_equal(result, expected) + + # Series/Index with list of Series + result = s.str.cat([t, s]) + tm.assert_equal(result, expected) + + # Series/Index with mixed list of Series/array + result = s.str.cat([t, s.values]) + tm.assert_equal(result, expected) + + # Series/Index with list of Series; different indexes + t.index = ["b", "c", "d", "a"] + expected = box(["aDa", "bAb", "cBc", "dCd"]) + expected = expected if box == Index else Series(expected.values, index=s.values) + result = s.str.cat([t, s]) + tm.assert_equal(result, expected) + + # Series/Index with mixed list; different index + result = s.str.cat([t, s.values]) + tm.assert_equal(result, expected) + + # Series/Index with DataFrame; different indexes + d.index = ["b", "c", "d", "a"] + expected = box(["aDd", "bAa", "cBb", "dCc"]) + expected = expected if box == Index else Series(expected.values, index=s.values) + result = s.str.cat(d) + tm.assert_equal(result, expected) + + # errors for incorrect lengths + rgx = r"If `others` contains arrays or lists \(or other list-likes.*" + z = Series(["1", "2", "3"]) + e = concat([z, z], axis=1) + + # two-dimensional ndarray + with pytest.raises(ValueError, match=rgx): + s.str.cat(e.values) + + # list of list-likes + with pytest.raises(ValueError, match=rgx): + s.str.cat([z.values, s.values]) + + # mixed list of Series/list-like + with pytest.raises(ValueError, match=rgx): + s.str.cat([z.values, s]) + + # errors for incorrect arguments in list-like + rgx = "others must be Series, Index, DataFrame,.*" + # make sure None/NaN do not crash checks in _get_series_list + u = Series(["a", np.nan, "c", None]) + + # mix of string and Series + with pytest.raises(TypeError, match=rgx): + s.str.cat([u, "u"]) + + # DataFrame in list + with pytest.raises(TypeError, match=rgx): + s.str.cat([u, d]) + + # 2-dim ndarray in list + with pytest.raises(TypeError, match=rgx): + s.str.cat([u, d.values]) + + # nested lists + with pytest.raises(TypeError, match=rgx): + s.str.cat([u, [u, d]]) + + # forbidden input type: set + # GH 23009 + with pytest.raises(TypeError, match=rgx): + s.str.cat(set(u)) + + # forbidden input type: set in list + # GH 23009 + with pytest.raises(TypeError, match=rgx): + s.str.cat([u, set(u)]) + + # other forbidden input type, e.g. int + with pytest.raises(TypeError, match=rgx): + s.str.cat(1) + + # nested list-likes + with pytest.raises(TypeError, match=rgx): + s.str.cat(iter([t.values, list(s)])) + + +@pytest.mark.parametrize("join", ["left", "outer", "inner", "right"]) +def test_str_cat_align_indexed(index_or_series, join): + # https://github.com/pandas-dev/pandas/issues/18657 + box = index_or_series + + s = Series(["a", "b", "c", "d"], index=["a", "b", "c", "d"]) + t = Series(["D", "A", "E", "B"], index=["d", "a", "e", "b"]) + sa, ta = s.align(t, join=join) + # result after manual alignment of inputs + expected = sa.str.cat(ta, na_rep="-") + + if box == Index: + s = Index(s) + sa = Index(sa) + expected = Index(expected) + + result = s.str.cat(t, join=join, na_rep="-") + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("join", ["left", "outer", "inner", "right"]) +def test_str_cat_align_mixed_inputs(join): + s = Series(["a", "b", "c", "d"]) + t = Series(["d", "a", "e", "b"], index=[3, 0, 4, 1]) + d = concat([t, t], axis=1) + + expected_outer = Series(["aaa", "bbb", "c--", "ddd", "-ee"]) + expected = expected_outer.loc[s.index.join(t.index, how=join)] + + # list of Series + result = s.str.cat([t, t], join=join, na_rep="-") + tm.assert_series_equal(result, expected) + + # DataFrame + result = s.str.cat(d, join=join, na_rep="-") + tm.assert_series_equal(result, expected) + + # mixed list of indexed/unindexed + u = np.array(["A", "B", "C", "D"]) + expected_outer = Series(["aaA", "bbB", "c-C", "ddD", "-e-"]) + # joint index of rhs [t, u]; u will be forced have index of s + rhs_idx = ( + t.index.intersection(s.index) + if join == "inner" + else t.index.union(s.index) + if join == "outer" + else t.index.append(s.index.difference(t.index)) + ) + + expected = expected_outer.loc[s.index.join(rhs_idx, how=join)] + result = s.str.cat([t, u], join=join, na_rep="-") + tm.assert_series_equal(result, expected) + + with pytest.raises(TypeError, match="others must be Series,.*"): + # nested lists are forbidden + s.str.cat([t, list(u)], join=join) + + # errors for incorrect lengths + rgx = r"If `others` contains arrays or lists \(or other list-likes.*" + z = Series(["1", "2", "3"]).values + + # unindexed object of wrong length + with pytest.raises(ValueError, match=rgx): + s.str.cat(z, join=join) + + # unindexed object of wrong length in list + with pytest.raises(ValueError, match=rgx): + s.str.cat([t, z], join=join) + + +def test_str_cat_all_na(index_or_series, index_or_series2): + # GH 24044 + box = index_or_series + other = index_or_series2 + + # check that all NaNs in caller / target work + s = Index(["a", "b", "c", "d"]) + s = s if box == Index else Series(s, index=s) + t = other([np.nan] * 4, dtype=object) + # add index of s for alignment + t = t if other == Index else Series(t, index=s) + + # all-NA target + if box == Series: + expected = Series([np.nan] * 4, index=s.index, dtype=s.dtype) + else: # box == Index + # TODO: Strimg option, this should return string dtype + expected = Index([np.nan] * 4, dtype=object) + result = s.str.cat(t, join="left") + tm.assert_equal(result, expected) + + # all-NA caller (only for Series) + if other == Series: + expected = Series([np.nan] * 4, dtype=object, index=t.index) + result = t.str.cat(s, join="left") + tm.assert_series_equal(result, expected) + + +def test_str_cat_special_cases(): + s = Series(["a", "b", "c", "d"]) + t = Series(["d", "a", "e", "b"], index=[3, 0, 4, 1]) + + # iterator of elements with different types + expected = Series(["aaa", "bbb", "c-c", "ddd", "-e-"]) + result = s.str.cat(iter([t, s.values]), join="outer", na_rep="-") + tm.assert_series_equal(result, expected) + + # right-align with different indexes in others + expected = Series(["aa-", "d-d"], index=[0, 3]) + result = s.str.cat([t.loc[[0]], t.loc[[3]]], join="right", na_rep="-") + tm.assert_series_equal(result, expected) + + +def test_cat_on_filtered_index(): + df = DataFrame( + index=MultiIndex.from_product( + [[2011, 2012], [1, 2, 3]], names=["year", "month"] + ) + ) + + df = df.reset_index() + df = df[df.month > 1] + + str_year = df.year.astype("str") + str_month = df.month.astype("str") + str_both = str_year.str.cat(str_month, sep=" ") + + assert str_both.loc[1] == "2011 2" + + str_multiple = str_year.str.cat([str_month, str_month], sep=" ") + + assert str_multiple.loc[1] == "2011 2 2" + + +@pytest.mark.parametrize("klass", [tuple, list, np.array, Series, Index]) +def test_cat_different_classes(klass): + # https://github.com/pandas-dev/pandas/issues/33425 + s = Series(["a", "b", "c"]) + result = s.str.cat(klass(["x", "y", "z"])) + expected = Series(["ax", "by", "cz"]) + tm.assert_series_equal(result, expected) + + +def test_cat_on_series_dot_str(): + # GH 28277 + ps = Series(["AbC", "de", "FGHI", "j", "kLLLm"]) + + message = re.escape( + "others must be Series, Index, DataFrame, np.ndarray " + "or list-like (either containing only strings or " + "containing only objects of type Series/Index/" + "np.ndarray[1-dim])" + ) + with pytest.raises(TypeError, match=message): + ps.str.cat(others=ps.str) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_extract.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_extract.py new file mode 100644 index 0000000000000000000000000000000000000000..77d008c650264889550ec70331a1b98064242d26 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_extract.py @@ -0,0 +1,724 @@ +from datetime import datetime +import re + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import ArrowDtype + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + _testing as tm, +) + + +def test_extract_expand_kwarg_wrong_type_raises(any_string_dtype): + # TODO: should this raise TypeError + values = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype) + with pytest.raises(ValueError, match="expand must be True or False"): + values.str.extract(".*(BAD[_]+).*(BAD)", expand=None) + + +def test_extract_expand_kwarg(any_string_dtype): + s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype) + expected = DataFrame(["BAD__", np.nan, np.nan], dtype=any_string_dtype) + + result = s.str.extract(".*(BAD[_]+).*") + tm.assert_frame_equal(result, expected) + + result = s.str.extract(".*(BAD[_]+).*", expand=True) + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + [["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype + ) + result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=False) + tm.assert_frame_equal(result, expected) + + +def test_extract_expand_False_mixed_object(): + ser = Series( + ["aBAD_BAD", np.nan, "BAD_b_BAD", True, datetime.today(), "foo", None, 1, 2.0] + ) + + # two groups + result = ser.str.extract(".*(BAD[_]+).*(BAD)", expand=False) + er = [np.nan, np.nan] # empty row + expected = DataFrame( + [["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er], dtype=object + ) + tm.assert_frame_equal(result, expected) + + # single group + result = ser.str.extract(".*(BAD[_]+).*BAD", expand=False) + expected = Series( + ["BAD_", np.nan, "BAD_", np.nan, np.nan, np.nan, None, np.nan, np.nan], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + +def test_extract_expand_index_raises(): + # GH9980 + # Index only works with one regex group since + # multi-group would expand to a frame + idx = Index(["A1", "A2", "A3", "A4", "B5"]) + msg = "only one regex group is supported with Index" + with pytest.raises(ValueError, match=msg): + idx.str.extract("([AB])([123])", expand=False) + + +def test_extract_expand_no_capture_groups_raises(index_or_series, any_string_dtype): + s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype) + msg = "pattern contains no capture groups" + + # no groups + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract("[ABC][123]", expand=False) + + # only non-capturing groups + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract("(?:[AB]).*", expand=False) + + +def test_extract_expand_single_capture_group(index_or_series, any_string_dtype): + # single group renames series/index properly + s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype) + result = s_or_idx.str.extract(r"(?PA)\d", expand=False) + + expected = index_or_series(["A", "A"], name="uno", dtype=any_string_dtype) + if index_or_series == Series: + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + + +def test_extract_expand_capture_groups(any_string_dtype): + s = Series(["A1", "B2", "C3"], dtype=any_string_dtype) + # one group, no matches + result = s.str.extract("(_)", expand=False) + expected = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + # two groups, no matches + result = s.str.extract("(_)(_)", expand=False) + expected = DataFrame( + [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # one group, some matches + result = s.str.extract("([AB])[123]", expand=False) + expected = Series(["A", "B", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + # two groups, some matches + result = s.str.extract("([AB])([123])", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # one named group + result = s.str.extract("(?P[AB])", expand=False) + expected = Series(["A", "B", np.nan], name="letter", dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + # two named groups + result = s.str.extract("(?P[AB])(?P[123])", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], + columns=["letter", "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + # mix named and unnamed groups + result = s.str.extract("([AB])(?P[123])", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], + columns=[0, "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + # one normal group, one non-capturing group + result = s.str.extract("([AB])(?:[123])", expand=False) + expected = Series(["A", "B", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + # two normal groups, one non-capturing group + s = Series(["A11", "B22", "C33"], dtype=any_string_dtype) + result = s.str.extract("([AB])([123])(?:[123])", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # one optional group followed by one normal group + s = Series(["A1", "B2", "3"], dtype=any_string_dtype) + result = s.str.extract("(?P[AB])?(?P[123])", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, "3"]], + columns=["letter", "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + # one normal group followed by one optional group + s = Series(["A1", "B2", "C"], dtype=any_string_dtype) + result = s.str.extract("(?P[ABC])(?P[123])?", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], ["C", np.nan]], + columns=["letter", "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_extract_expand_capture_groups_index(index, any_string_dtype): + # https://github.com/pandas-dev/pandas/issues/6348 + # not passing index to the extractor + data = ["A1", "B2", "C"] + + if len(index) == 0: + pytest.skip("Test requires len(index) > 0") + while len(index) < len(data): + index = index.repeat(2) + + index = index[: len(data)] + ser = Series(data, index=index, dtype=any_string_dtype) + + result = ser.str.extract(r"(\d)", expand=False) + expected = Series(["1", "2", np.nan], index=index, dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + result = ser.str.extract(r"(?P\D)(?P\d)?", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], ["C", np.nan]], + columns=["letter", "number"], + index=index, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_extract_single_series_name_is_preserved(any_string_dtype): + s = Series(["a3", "b3", "c2"], name="bob", dtype=any_string_dtype) + result = s.str.extract(r"(?P[a-z])", expand=False) + expected = Series(["a", "b", "c"], name="sue", dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_extract_expand_True(any_string_dtype): + # Contains tests like those in test_match and some others. + s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype) + + result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=True) + expected = DataFrame( + [["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + +def test_extract_expand_True_mixed_object(): + er = [np.nan, np.nan] # empty row + mixed = Series( + [ + "aBAD_BAD", + np.nan, + "BAD_b_BAD", + True, + datetime.today(), + "foo", + None, + 1, + 2.0, + ] + ) + + result = mixed.str.extract(".*(BAD[_]+).*(BAD)", expand=True) + expected = DataFrame( + [["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er], dtype=object + ) + tm.assert_frame_equal(result, expected) + + +def test_extract_expand_True_single_capture_group_raises( + index_or_series, any_string_dtype +): + # these should work for both Series and Index + # no groups + s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype) + msg = "pattern contains no capture groups" + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract("[ABC][123]", expand=True) + + # only non-capturing groups + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract("(?:[AB]).*", expand=True) + + +def test_extract_expand_True_single_capture_group(index_or_series, any_string_dtype): + # single group renames series/index properly + s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype) + result = s_or_idx.str.extract(r"(?PA)\d", expand=True) + expected = DataFrame({"uno": ["A", "A"]}, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("name", [None, "series_name"]) +def test_extract_series(name, any_string_dtype): + # extract should give the same result whether or not the series has a name. + s = Series(["A1", "B2", "C3"], name=name, dtype=any_string_dtype) + + # one group, no matches + result = s.str.extract("(_)", expand=True) + expected = DataFrame([np.nan, np.nan, np.nan], dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + # two groups, no matches + result = s.str.extract("(_)(_)", expand=True) + expected = DataFrame( + [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # one group, some matches + result = s.str.extract("([AB])[123]", expand=True) + expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + # two groups, some matches + result = s.str.extract("([AB])([123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # one named group + result = s.str.extract("(?P[AB])", expand=True) + expected = DataFrame({"letter": ["A", "B", np.nan]}, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + # two named groups + result = s.str.extract("(?P[AB])(?P[123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], + columns=["letter", "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + # mix named and unnamed groups + result = s.str.extract("([AB])(?P[123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], + columns=[0, "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + # one normal group, one non-capturing group + result = s.str.extract("([AB])(?:[123])", expand=True) + expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + +def test_extract_optional_groups(any_string_dtype): + # two normal groups, one non-capturing group + s = Series(["A11", "B22", "C33"], dtype=any_string_dtype) + result = s.str.extract("([AB])([123])(?:[123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # one optional group followed by one normal group + s = Series(["A1", "B2", "3"], dtype=any_string_dtype) + result = s.str.extract("(?P[AB])?(?P[123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, "3"]], + columns=["letter", "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + # one normal group followed by one optional group + s = Series(["A1", "B2", "C"], dtype=any_string_dtype) + result = s.str.extract("(?P[ABC])(?P[123])?", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], ["C", np.nan]], + columns=["letter", "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_extract_dataframe_capture_groups_index(index, any_string_dtype): + # GH6348 + # not passing index to the extractor + + data = ["A1", "B2", "C"] + + if len(index) < len(data): + pytest.skip(f"Index needs more than {len(data)} values") + + index = index[: len(data)] + s = Series(data, index=index, dtype=any_string_dtype) + + result = s.str.extract(r"(\d)", expand=True) + expected = DataFrame(["1", "2", np.nan], index=index, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + result = s.str.extract(r"(?P\D)(?P\d)?", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], ["C", np.nan]], + columns=["letter", "number"], + index=index, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_extract_single_group_returns_frame(any_string_dtype): + # GH11386 extract should always return DataFrame, even when + # there is only one group. Prior to v0.18.0, extract returned + # Series when there was only one group in the regex. + s = Series(["a3", "b3", "c2"], name="series_name", dtype=any_string_dtype) + result = s.str.extract(r"(?P[a-z])", expand=True) + expected = DataFrame({"letter": ["a", "b", "c"]}, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + +def test_extractall(any_string_dtype): + data = [ + "dave@google.com", + "tdhock5@gmail.com", + "maudelaperriere@gmail.com", + "rob@gmail.com some text steve@gmail.com", + "a@b.com some text c@d.com and e@f.com", + np.nan, + "", + ] + expected_tuples = [ + ("dave", "google", "com"), + ("tdhock5", "gmail", "com"), + ("maudelaperriere", "gmail", "com"), + ("rob", "gmail", "com"), + ("steve", "gmail", "com"), + ("a", "b", "com"), + ("c", "d", "com"), + ("e", "f", "com"), + ] + pat = r""" + (?P[a-z0-9]+) + @ + (?P[a-z]+) + \. + (?P[a-z]{2,4}) + """ + expected_columns = ["user", "domain", "tld"] + s = Series(data, dtype=any_string_dtype) + # extractall should return a DataFrame with one row for each match, indexed by the + # subject from which the match came. + expected_index = MultiIndex.from_tuples( + [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 0), (4, 1), (4, 2)], + names=(None, "match"), + ) + expected = DataFrame( + expected_tuples, expected_index, expected_columns, dtype=any_string_dtype + ) + result = s.str.extractall(pat, flags=re.VERBOSE) + tm.assert_frame_equal(result, expected) + + # The index of the input Series should be used to construct the index of the output + # DataFrame: + mi = MultiIndex.from_tuples( + [ + ("single", "Dave"), + ("single", "Toby"), + ("single", "Maude"), + ("multiple", "robAndSteve"), + ("multiple", "abcdef"), + ("none", "missing"), + ("none", "empty"), + ] + ) + s = Series(data, index=mi, dtype=any_string_dtype) + expected_index = MultiIndex.from_tuples( + [ + ("single", "Dave", 0), + ("single", "Toby", 0), + ("single", "Maude", 0), + ("multiple", "robAndSteve", 0), + ("multiple", "robAndSteve", 1), + ("multiple", "abcdef", 0), + ("multiple", "abcdef", 1), + ("multiple", "abcdef", 2), + ], + names=(None, None, "match"), + ) + expected = DataFrame( + expected_tuples, expected_index, expected_columns, dtype=any_string_dtype + ) + result = s.str.extractall(pat, flags=re.VERBOSE) + tm.assert_frame_equal(result, expected) + + # MultiIndexed subject with names. + s = Series(data, index=mi, dtype=any_string_dtype) + s.index.names = ("matches", "description") + expected_index.names = ("matches", "description", "match") + expected = DataFrame( + expected_tuples, expected_index, expected_columns, dtype=any_string_dtype + ) + result = s.str.extractall(pat, flags=re.VERBOSE) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "pat,expected_names", + [ + # optional groups. + ("(?P[AB])?(?P[123])", ["letter", "number"]), + # only one of two groups has a name. + ("([AB])?(?P[123])", [0, "number"]), + ], +) +def test_extractall_column_names(pat, expected_names, any_string_dtype): + s = Series(["", "A1", "32"], dtype=any_string_dtype) + + result = s.str.extractall(pat) + expected = DataFrame( + [("A", "1"), (np.nan, "3"), (np.nan, "2")], + index=MultiIndex.from_tuples([(1, 0), (2, 0), (2, 1)], names=(None, "match")), + columns=expected_names, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_extractall_single_group(any_string_dtype): + s = Series(["a3", "b3", "d4c2"], name="series_name", dtype=any_string_dtype) + expected_index = MultiIndex.from_tuples( + [(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match") + ) + + # extractall(one named group) returns DataFrame with one named column. + result = s.str.extractall(r"(?P[a-z])") + expected = DataFrame( + {"letter": ["a", "b", "d", "c"]}, index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # extractall(one un-named group) returns DataFrame with one un-named column. + result = s.str.extractall(r"([a-z])") + expected = DataFrame( + ["a", "b", "d", "c"], index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + +def test_extractall_single_group_with_quantifier(any_string_dtype): + # GH#13382 + # extractall(one un-named group with quantifier) returns DataFrame with one un-named + # column. + s = Series(["ab3", "abc3", "d4cd2"], name="series_name", dtype=any_string_dtype) + result = s.str.extractall(r"([a-z]+)") + expected = DataFrame( + ["ab", "abc", "d", "cd"], + index=MultiIndex.from_tuples( + [(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match") + ), + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data, names", + [ + ([], (None,)), + ([], ("i1",)), + ([], (None, "i2")), + ([], ("i1", "i2")), + (["a3", "b3", "d4c2"], (None,)), + (["a3", "b3", "d4c2"], ("i1", "i2")), + (["a3", "b3", "d4c2"], (None, "i2")), + (["a3", "b3", "d4c2"], ("i1", "i2")), + ], +) +def test_extractall_no_matches(data, names, any_string_dtype): + # GH19075 extractall with no matches should return a valid MultiIndex + n = len(data) + if len(names) == 1: + index = Index(range(n), name=names[0]) + else: + tuples = (tuple([i] * (n - 1)) for i in range(n)) + index = MultiIndex.from_tuples(tuples, names=names) + s = Series(data, name="series_name", index=index, dtype=any_string_dtype) + expected_index = MultiIndex.from_tuples([], names=(names + ("match",))) + + # one un-named group. + result = s.str.extractall("(z)") + expected = DataFrame(columns=[0], index=expected_index, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + # two un-named groups. + result = s.str.extractall("(z)(z)") + expected = DataFrame(columns=[0, 1], index=expected_index, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + # one named group. + result = s.str.extractall("(?Pz)") + expected = DataFrame( + columns=["first"], index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # two named groups. + result = s.str.extractall("(?Pz)(?Pz)") + expected = DataFrame( + columns=["first", "second"], index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # one named, one un-named. + result = s.str.extractall("(z)(?Pz)") + expected = DataFrame( + columns=[0, "second"], index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + +def test_extractall_stringindex(any_string_dtype): + s = Series(["a1a2", "b1", "c1"], name="xxx", dtype=any_string_dtype) + result = s.str.extractall(r"[ab](?P\d)") + expected = DataFrame( + {"digit": ["1", "2", "1"]}, + index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)], names=[None, "match"]), + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + # index should return the same result as the default index without name thus + # index.name doesn't affect to the result + if any_string_dtype == "object": + for idx in [ + Index(["a1a2", "b1", "c1"], dtype=object), + Index(["a1a2", "b1", "c1"], name="xxx", dtype=object), + ]: + result = idx.str.extractall(r"[ab](?P\d)") + tm.assert_frame_equal(result, expected) + + s = Series( + ["a1a2", "b1", "c1"], + name="s_name", + index=Index(["XX", "yy", "zz"], name="idx_name"), + dtype=any_string_dtype, + ) + result = s.str.extractall(r"[ab](?P\d)") + expected = DataFrame( + {"digit": ["1", "2", "1"]}, + index=MultiIndex.from_tuples( + [("XX", 0), ("XX", 1), ("yy", 0)], names=["idx_name", "match"] + ), + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_extractall_no_capture_groups_raises(any_string_dtype): + # Does not make sense to use extractall with a regex that has no capture groups. + # (it returns DataFrame with one column for each capture group) + s = Series(["a3", "b3", "d4c2"], name="series_name", dtype=any_string_dtype) + with pytest.raises(ValueError, match="no capture groups"): + s.str.extractall(r"[a-z]") + + +def test_extract_index_one_two_groups(): + s = Series(["a3", "b3", "d4c2"], index=["A3", "B3", "D4"], name="series_name") + r = s.index.str.extract(r"([A-Z])", expand=True) + e = DataFrame(["A", "B", "D"]) + tm.assert_frame_equal(r, e) + + # Prior to v0.18.0, index.str.extract(regex with one group) + # returned Index. With more than one group, extract raised an + # error (GH9980). Now extract always returns DataFrame. + r = s.index.str.extract(r"(?P[A-Z])(?P[0-9])", expand=True) + e_list = [("A", "3"), ("B", "3"), ("D", "4")] + e = DataFrame(e_list, columns=["letter", "digit"]) + tm.assert_frame_equal(r, e) + + +def test_extractall_same_as_extract(any_string_dtype): + s = Series(["a3", "b3", "c2"], name="series_name", dtype=any_string_dtype) + + pattern_two_noname = r"([a-z])([0-9])" + extract_two_noname = s.str.extract(pattern_two_noname, expand=True) + has_multi_index = s.str.extractall(pattern_two_noname) + no_multi_index = has_multi_index.xs(0, level="match") + tm.assert_frame_equal(extract_two_noname, no_multi_index) + + pattern_two_named = r"(?P[a-z])(?P[0-9])" + extract_two_named = s.str.extract(pattern_two_named, expand=True) + has_multi_index = s.str.extractall(pattern_two_named) + no_multi_index = has_multi_index.xs(0, level="match") + tm.assert_frame_equal(extract_two_named, no_multi_index) + + pattern_one_named = r"(?P[a-z])" + extract_one_named = s.str.extract(pattern_one_named, expand=True) + has_multi_index = s.str.extractall(pattern_one_named) + no_multi_index = has_multi_index.xs(0, level="match") + tm.assert_frame_equal(extract_one_named, no_multi_index) + + pattern_one_noname = r"([a-z])" + extract_one_noname = s.str.extract(pattern_one_noname, expand=True) + has_multi_index = s.str.extractall(pattern_one_noname) + no_multi_index = has_multi_index.xs(0, level="match") + tm.assert_frame_equal(extract_one_noname, no_multi_index) + + +def test_extractall_same_as_extract_subject_index(any_string_dtype): + # same as above tests, but s has an MultiIndex. + mi = MultiIndex.from_tuples( + [("A", "first"), ("B", "second"), ("C", "third")], + names=("capital", "ordinal"), + ) + s = Series(["a3", "b3", "c2"], index=mi, name="series_name", dtype=any_string_dtype) + + pattern_two_noname = r"([a-z])([0-9])" + extract_two_noname = s.str.extract(pattern_two_noname, expand=True) + has_match_index = s.str.extractall(pattern_two_noname) + no_match_index = has_match_index.xs(0, level="match") + tm.assert_frame_equal(extract_two_noname, no_match_index) + + pattern_two_named = r"(?P[a-z])(?P[0-9])" + extract_two_named = s.str.extract(pattern_two_named, expand=True) + has_match_index = s.str.extractall(pattern_two_named) + no_match_index = has_match_index.xs(0, level="match") + tm.assert_frame_equal(extract_two_named, no_match_index) + + pattern_one_named = r"(?P[a-z])" + extract_one_named = s.str.extract(pattern_one_named, expand=True) + has_match_index = s.str.extractall(pattern_one_named) + no_match_index = has_match_index.xs(0, level="match") + tm.assert_frame_equal(extract_one_named, no_match_index) + + pattern_one_noname = r"([a-z])" + extract_one_noname = s.str.extract(pattern_one_noname, expand=True) + has_match_index = s.str.extractall(pattern_one_noname) + no_match_index = has_match_index.xs(0, level="match") + tm.assert_frame_equal(extract_one_noname, no_match_index) + + +def test_extractall_preserves_dtype(): + # Ensure that when extractall is called on a series with specific dtypes set, that + # the dtype is preserved in the resulting DataFrame's column. + pa = pytest.importorskip("pyarrow") + + result = Series(["abc", "ab"], dtype=ArrowDtype(pa.string())).str.extractall("(ab)") + assert result.dtypes[0] == "string[pyarrow]" diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_find_replace.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_find_replace.py new file mode 100644 index 0000000000000000000000000000000000000000..cd4707ac405de391f2fea3fca8e9578e0ba8aef2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_find_replace.py @@ -0,0 +1,972 @@ +from datetime import datetime +import re + +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Series, + _testing as tm, +) +from pandas.tests.strings import ( + _convert_na_value, + object_pyarrow_numpy, +) + +# -------------------------------------------------------------------------------------- +# str.contains +# -------------------------------------------------------------------------------------- + + +def using_pyarrow(dtype): + return dtype in ("string[pyarrow]", "string[pyarrow_numpy]") + + +def test_contains(any_string_dtype): + values = np.array( + ["foo", np.nan, "fooommm__foo", "mmm_", "foommm[_]+bar"], dtype=np.object_ + ) + values = Series(values, dtype=any_string_dtype) + pat = "mmm[_]+" + + result = values.str.contains(pat) + expected_dtype = "object" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series( + np.array([False, np.nan, True, True, False], dtype=np.object_), + dtype=expected_dtype, + ) + tm.assert_series_equal(result, expected) + + result = values.str.contains(pat, regex=False) + expected = Series( + np.array([False, np.nan, False, False, True], dtype=np.object_), + dtype=expected_dtype, + ) + tm.assert_series_equal(result, expected) + + values = Series( + np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=object), + dtype=any_string_dtype, + ) + result = values.str.contains(pat) + expected_dtype = np.bool_ if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series(np.array([False, False, True, True]), dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + # case insensitive using regex + values = Series( + np.array(["Foo", "xYz", "fOOomMm__fOo", "MMM_"], dtype=object), + dtype=any_string_dtype, + ) + + result = values.str.contains("FOO|mmm", case=False) + expected = Series(np.array([True, False, True, True]), dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + # case insensitive without regex + result = values.str.contains("foo", regex=False, case=False) + expected = Series(np.array([True, False, True, False]), dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + # unicode + values = Series( + np.array(["foo", np.nan, "fooommm__foo", "mmm_"], dtype=np.object_), + dtype=any_string_dtype, + ) + pat = "mmm[_]+" + + result = values.str.contains(pat) + expected_dtype = "object" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series( + np.array([False, np.nan, True, True], dtype=np.object_), dtype=expected_dtype + ) + tm.assert_series_equal(result, expected) + + result = values.str.contains(pat, na=False) + expected_dtype = np.bool_ if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series(np.array([False, False, True, True]), dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + values = Series( + np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=np.object_), + dtype=any_string_dtype, + ) + result = values.str.contains(pat) + expected = Series(np.array([False, False, True, True]), dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +def test_contains_object_mixed(): + mixed = Series( + np.array( + ["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0], + dtype=object, + ) + ) + result = mixed.str.contains("o") + expected = Series( + np.array( + [False, np.nan, False, np.nan, np.nan, True, None, np.nan, np.nan], + dtype=np.object_, + ) + ) + tm.assert_series_equal(result, expected) + + +def test_contains_na_kwarg_for_object_category(): + # gh 22158 + + # na for category + values = Series(["a", "b", "c", "a", np.nan], dtype="category") + result = values.str.contains("a", na=True) + expected = Series([True, False, False, True, True]) + tm.assert_series_equal(result, expected) + + result = values.str.contains("a", na=False) + expected = Series([True, False, False, True, False]) + tm.assert_series_equal(result, expected) + + # na for objects + values = Series(["a", "b", "c", "a", np.nan]) + result = values.str.contains("a", na=True) + expected = Series([True, False, False, True, True]) + tm.assert_series_equal(result, expected) + + result = values.str.contains("a", na=False) + expected = Series([True, False, False, True, False]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "na, expected", + [ + (None, pd.NA), + (True, True), + (False, False), + (0, False), + (3, True), + (np.nan, pd.NA), + ], +) +@pytest.mark.parametrize("regex", [True, False]) +def test_contains_na_kwarg_for_nullable_string_dtype( + nullable_string_dtype, na, expected, regex +): + # https://github.com/pandas-dev/pandas/pull/41025#issuecomment-824062416 + + values = Series(["a", "b", "c", "a", np.nan], dtype=nullable_string_dtype) + result = values.str.contains("a", na=na, regex=regex) + expected = Series([True, False, False, True, expected], dtype="boolean") + tm.assert_series_equal(result, expected) + + +def test_contains_moar(any_string_dtype): + # PR #1179 + s = Series( + ["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"], + dtype=any_string_dtype, + ) + + result = s.str.contains("a") + expected_dtype = "object" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series( + [False, False, False, True, True, False, np.nan, False, False, True], + dtype=expected_dtype, + ) + tm.assert_series_equal(result, expected) + + result = s.str.contains("a", case=False) + expected = Series( + [True, False, False, True, True, False, np.nan, True, False, True], + dtype=expected_dtype, + ) + tm.assert_series_equal(result, expected) + + result = s.str.contains("Aa") + expected = Series( + [False, False, False, True, False, False, np.nan, False, False, False], + dtype=expected_dtype, + ) + tm.assert_series_equal(result, expected) + + result = s.str.contains("ba") + expected = Series( + [False, False, False, True, False, False, np.nan, False, False, False], + dtype=expected_dtype, + ) + tm.assert_series_equal(result, expected) + + result = s.str.contains("ba", case=False) + expected = Series( + [False, False, False, True, True, False, np.nan, True, False, False], + dtype=expected_dtype, + ) + tm.assert_series_equal(result, expected) + + +def test_contains_nan(any_string_dtype): + # PR #14171 + s = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype) + + result = s.str.contains("foo", na=False) + expected_dtype = np.bool_ if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series([False, False, False], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = s.str.contains("foo", na=True) + expected = Series([True, True, True], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = s.str.contains("foo", na="foo") + if any_string_dtype == "object": + expected = Series(["foo", "foo", "foo"], dtype=np.object_) + elif any_string_dtype == "string[pyarrow_numpy]": + expected = Series([True, True, True], dtype=np.bool_) + else: + expected = Series([True, True, True], dtype="boolean") + tm.assert_series_equal(result, expected) + + result = s.str.contains("foo") + expected_dtype = "object" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series([np.nan, np.nan, np.nan], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +# -------------------------------------------------------------------------------------- +# str.startswith +# -------------------------------------------------------------------------------------- + + +@pytest.mark.parametrize("pat", ["foo", ("foo", "baz")]) +@pytest.mark.parametrize("dtype", ["object", "category"]) +@pytest.mark.parametrize("null_value", [None, np.nan, pd.NA]) +@pytest.mark.parametrize("na", [True, False]) +def test_startswith(pat, dtype, null_value, na): + # add category dtype parametrizations for GH-36241 + values = Series( + ["om", null_value, "foo_nom", "nom", "bar_foo", null_value, "foo"], + dtype=dtype, + ) + + result = values.str.startswith(pat) + exp = Series([False, np.nan, True, False, False, np.nan, True]) + if dtype == "object" and null_value is pd.NA: + # GH#18463 + exp = exp.fillna(null_value) + elif dtype == "object" and null_value is None: + exp[exp.isna()] = None + tm.assert_series_equal(result, exp) + + result = values.str.startswith(pat, na=na) + exp = Series([False, na, True, False, False, na, True]) + tm.assert_series_equal(result, exp) + + # mixed + mixed = np.array( + ["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0], + dtype=np.object_, + ) + rs = Series(mixed).str.startswith("f") + xp = Series([False, np.nan, False, np.nan, np.nan, True, None, np.nan, np.nan]) + tm.assert_series_equal(rs, xp) + + +@pytest.mark.parametrize("na", [None, True, False]) +def test_startswith_nullable_string_dtype(nullable_string_dtype, na): + values = Series( + ["om", None, "foo_nom", "nom", "bar_foo", None, "foo", "regex", "rege."], + dtype=nullable_string_dtype, + ) + result = values.str.startswith("foo", na=na) + exp = Series( + [False, na, True, False, False, na, True, False, False], dtype="boolean" + ) + tm.assert_series_equal(result, exp) + + result = values.str.startswith("rege.", na=na) + exp = Series( + [False, na, False, False, False, na, False, False, True], dtype="boolean" + ) + tm.assert_series_equal(result, exp) + + +# -------------------------------------------------------------------------------------- +# str.endswith +# -------------------------------------------------------------------------------------- + + +@pytest.mark.parametrize("pat", ["foo", ("foo", "baz")]) +@pytest.mark.parametrize("dtype", ["object", "category"]) +@pytest.mark.parametrize("null_value", [None, np.nan, pd.NA]) +@pytest.mark.parametrize("na", [True, False]) +def test_endswith(pat, dtype, null_value, na): + # add category dtype parametrizations for GH-36241 + values = Series( + ["om", null_value, "foo_nom", "nom", "bar_foo", null_value, "foo"], + dtype=dtype, + ) + + result = values.str.endswith(pat) + exp = Series([False, np.nan, False, False, True, np.nan, True]) + if dtype == "object" and null_value is pd.NA: + # GH#18463 + exp = exp.fillna(null_value) + elif dtype == "object" and null_value is None: + exp[exp.isna()] = None + tm.assert_series_equal(result, exp) + + result = values.str.endswith(pat, na=na) + exp = Series([False, na, False, False, True, na, True]) + tm.assert_series_equal(result, exp) + + # mixed + mixed = np.array( + ["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0], + dtype=object, + ) + rs = Series(mixed).str.endswith("f") + xp = Series([False, np.nan, False, np.nan, np.nan, False, None, np.nan, np.nan]) + tm.assert_series_equal(rs, xp) + + +@pytest.mark.parametrize("na", [None, True, False]) +def test_endswith_nullable_string_dtype(nullable_string_dtype, na): + values = Series( + ["om", None, "foo_nom", "nom", "bar_foo", None, "foo", "regex", "rege."], + dtype=nullable_string_dtype, + ) + result = values.str.endswith("foo", na=na) + exp = Series( + [False, na, False, False, True, na, True, False, False], dtype="boolean" + ) + tm.assert_series_equal(result, exp) + + result = values.str.endswith("rege.", na=na) + exp = Series( + [False, na, False, False, False, na, False, False, True], dtype="boolean" + ) + tm.assert_series_equal(result, exp) + + +# -------------------------------------------------------------------------------------- +# str.replace +# -------------------------------------------------------------------------------------- + + +def test_replace(any_string_dtype): + ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype) + + result = ser.str.replace("BAD[_]*", "", regex=True) + expected = Series(["foobar", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_replace_max_replacements(any_string_dtype): + ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype) + + expected = Series(["foobarBAD", np.nan], dtype=any_string_dtype) + result = ser.str.replace("BAD[_]*", "", n=1, regex=True) + tm.assert_series_equal(result, expected) + + expected = Series(["foo__barBAD", np.nan], dtype=any_string_dtype) + result = ser.str.replace("BAD", "", n=1, regex=False) + tm.assert_series_equal(result, expected) + + +def test_replace_mixed_object(): + ser = Series( + ["aBAD", np.nan, "bBAD", True, datetime.today(), "fooBAD", None, 1, 2.0] + ) + result = Series(ser).str.replace("BAD[_]*", "", regex=True) + expected = Series( + ["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object + ) + tm.assert_series_equal(result, expected) + + +def test_replace_unicode(any_string_dtype): + ser = Series([b"abcd,\xc3\xa0".decode("utf-8")], dtype=any_string_dtype) + expected = Series([b"abcd, \xc3\xa0".decode("utf-8")], dtype=any_string_dtype) + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE, regex=True) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("repl", [None, 3, {"a": "b"}]) +@pytest.mark.parametrize("data", [["a", "b", None], ["a", "b", "c", "ad"]]) +def test_replace_wrong_repl_type_raises(any_string_dtype, index_or_series, repl, data): + # https://github.com/pandas-dev/pandas/issues/13438 + msg = "repl must be a string or callable" + obj = index_or_series(data, dtype=any_string_dtype) + with pytest.raises(TypeError, match=msg): + obj.str.replace("a", repl) + + +def test_replace_callable(any_string_dtype): + # GH 15055 + ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype) + + # test with callable + repl = lambda m: m.group(0).swapcase() + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace("[a-z][A-Z]{2}", repl, n=2, regex=True) + expected = Series(["foObaD__baRbaD", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "repl", [lambda: None, lambda m, x: None, lambda m, x, y=None: None] +) +def test_replace_callable_raises(any_string_dtype, repl): + # GH 15055 + values = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype) + + # test with wrong number of arguments, raising an error + msg = ( + r"((takes)|(missing)) (?(2)from \d+ to )?\d+ " + r"(?(3)required )positional arguments?" + ) + with pytest.raises(TypeError, match=msg): + with tm.maybe_produces_warning( + PerformanceWarning, using_pyarrow(any_string_dtype) + ): + values.str.replace("a", repl, regex=True) + + +def test_replace_callable_named_groups(any_string_dtype): + # test regex named groups + ser = Series(["Foo Bar Baz", np.nan], dtype=any_string_dtype) + pat = r"(?P\w+) (?P\w+) (?P\w+)" + repl = lambda m: m.group("middle").swapcase() + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace(pat, repl, regex=True) + expected = Series(["bAR", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_replace_compiled_regex(any_string_dtype): + # GH 15446 + ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype) + + # test with compiled regex + pat = re.compile(r"BAD_*") + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace(pat, "", regex=True) + expected = Series(["foobar", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace(pat, "", n=1, regex=True) + expected = Series(["foobarBAD", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_replace_compiled_regex_mixed_object(): + pat = re.compile(r"BAD_*") + ser = Series( + ["aBAD", np.nan, "bBAD", True, datetime.today(), "fooBAD", None, 1, 2.0] + ) + result = Series(ser).str.replace(pat, "", regex=True) + expected = Series( + ["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object + ) + tm.assert_series_equal(result, expected) + + +def test_replace_compiled_regex_unicode(any_string_dtype): + ser = Series([b"abcd,\xc3\xa0".decode("utf-8")], dtype=any_string_dtype) + expected = Series([b"abcd, \xc3\xa0".decode("utf-8")], dtype=any_string_dtype) + pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE) + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace(pat, ", ", regex=True) + tm.assert_series_equal(result, expected) + + +def test_replace_compiled_regex_raises(any_string_dtype): + # case and flags provided to str.replace will have no effect + # and will produce warnings + ser = Series(["fooBAD__barBAD__bad", np.nan], dtype=any_string_dtype) + pat = re.compile(r"BAD_*") + + msg = "case and flags cannot be set when pat is a compiled regex" + + with pytest.raises(ValueError, match=msg): + ser.str.replace(pat, "", flags=re.IGNORECASE, regex=True) + + with pytest.raises(ValueError, match=msg): + ser.str.replace(pat, "", case=False, regex=True) + + with pytest.raises(ValueError, match=msg): + ser.str.replace(pat, "", case=True, regex=True) + + +def test_replace_compiled_regex_callable(any_string_dtype): + # test with callable + ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype) + repl = lambda m: m.group(0).swapcase() + pat = re.compile("[a-z][A-Z]{2}") + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace(pat, repl, n=2, regex=True) + expected = Series(["foObaD__baRbaD", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "regex,expected", [(True, ["bao", "bao", np.nan]), (False, ["bao", "foo", np.nan])] +) +def test_replace_literal(regex, expected, any_string_dtype): + # GH16808 literal replace (regex=False vs regex=True) + ser = Series(["f.o", "foo", np.nan], dtype=any_string_dtype) + expected = Series(expected, dtype=any_string_dtype) + result = ser.str.replace("f.", "ba", regex=regex) + tm.assert_series_equal(result, expected) + + +def test_replace_literal_callable_raises(any_string_dtype): + ser = Series([], dtype=any_string_dtype) + repl = lambda m: m.group(0).swapcase() + + msg = "Cannot use a callable replacement when regex=False" + with pytest.raises(ValueError, match=msg): + ser.str.replace("abc", repl, regex=False) + + +def test_replace_literal_compiled_raises(any_string_dtype): + ser = Series([], dtype=any_string_dtype) + pat = re.compile("[a-z][A-Z]{2}") + + msg = "Cannot use a compiled regex as replacement pattern with regex=False" + with pytest.raises(ValueError, match=msg): + ser.str.replace(pat, "", regex=False) + + +def test_replace_moar(any_string_dtype): + # PR #1179 + ser = Series( + ["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"], + dtype=any_string_dtype, + ) + + result = ser.str.replace("A", "YYY") + expected = Series( + ["YYY", "B", "C", "YYYaba", "Baca", "", np.nan, "CYYYBYYY", "dog", "cat"], + dtype=any_string_dtype, + ) + tm.assert_series_equal(result, expected) + + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace("A", "YYY", case=False) + expected = Series( + [ + "YYY", + "B", + "C", + "YYYYYYbYYY", + "BYYYcYYY", + "", + np.nan, + "CYYYBYYY", + "dog", + "cYYYt", + ], + dtype=any_string_dtype, + ) + tm.assert_series_equal(result, expected) + + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace("^.a|dog", "XX-XX ", case=False, regex=True) + expected = Series( + [ + "A", + "B", + "C", + "XX-XX ba", + "XX-XX ca", + "", + np.nan, + "XX-XX BA", + "XX-XX ", + "XX-XX t", + ], + dtype=any_string_dtype, + ) + tm.assert_series_equal(result, expected) + + +def test_replace_not_case_sensitive_not_regex(any_string_dtype): + # https://github.com/pandas-dev/pandas/issues/41602 + ser = Series(["A.", "a.", "Ab", "ab", np.nan], dtype=any_string_dtype) + + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace("a", "c", case=False, regex=False) + expected = Series(["c.", "c.", "cb", "cb", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.replace("a.", "c.", case=False, regex=False) + expected = Series(["c.", "c.", "Ab", "ab", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_replace_regex(any_string_dtype): + # https://github.com/pandas-dev/pandas/pull/24809 + s = Series(["a", "b", "ac", np.nan, ""], dtype=any_string_dtype) + result = s.str.replace("^.$", "a", regex=True) + expected = Series(["a", "a", "ac", np.nan, ""], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("regex", [True, False]) +def test_replace_regex_single_character(regex, any_string_dtype): + # https://github.com/pandas-dev/pandas/pull/24809, enforced in 2.0 + # GH 24804 + s = Series(["a.b", ".", "b", np.nan, ""], dtype=any_string_dtype) + + result = s.str.replace(".", "a", regex=regex) + if regex: + expected = Series(["aaa", "a", "a", np.nan, ""], dtype=any_string_dtype) + else: + expected = Series(["aab", "a", "b", np.nan, ""], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +# -------------------------------------------------------------------------------------- +# str.match +# -------------------------------------------------------------------------------------- + + +def test_match(any_string_dtype): + # New match behavior introduced in 0.13 + expected_dtype = "object" if any_string_dtype in object_pyarrow_numpy else "boolean" + + values = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype) + result = values.str.match(".*(BAD[_]+).*(BAD)") + expected = Series([True, np.nan, False], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + values = Series( + ["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"], dtype=any_string_dtype + ) + result = values.str.match(".*BAD[_]+.*BAD") + expected = Series([True, True, np.nan, False], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = values.str.match("BAD[_]+.*BAD") + expected = Series([False, True, np.nan, False], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + values = Series( + ["fooBAD__barBAD", "^BAD_BADleroybrown", np.nan, "foo"], dtype=any_string_dtype + ) + result = values.str.match("^BAD[_]+.*BAD") + expected = Series([False, False, np.nan, False], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = values.str.match("\\^BAD[_]+.*BAD") + expected = Series([False, True, np.nan, False], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +def test_match_mixed_object(): + mixed = Series( + [ + "aBAD_BAD", + np.nan, + "BAD_b_BAD", + True, + datetime.today(), + "foo", + None, + 1, + 2.0, + ] + ) + result = Series(mixed).str.match(".*(BAD[_]+).*(BAD)") + expected = Series([True, np.nan, True, np.nan, np.nan, False, None, np.nan, np.nan]) + assert isinstance(result, Series) + tm.assert_series_equal(result, expected) + + +def test_match_na_kwarg(any_string_dtype): + # GH #6609 + s = Series(["a", "b", np.nan], dtype=any_string_dtype) + + result = s.str.match("a", na=False) + expected_dtype = np.bool_ if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series([True, False, False], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = s.str.match("a") + expected_dtype = "object" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series([True, False, np.nan], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +def test_match_case_kwarg(any_string_dtype): + values = Series(["ab", "AB", "abc", "ABC"], dtype=any_string_dtype) + result = values.str.match("ab", case=False) + expected_dtype = np.bool_ if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series([True, True, True, True], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +# -------------------------------------------------------------------------------------- +# str.fullmatch +# -------------------------------------------------------------------------------------- + + +def test_fullmatch(any_string_dtype): + # GH 32806 + ser = Series( + ["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"], dtype=any_string_dtype + ) + result = ser.str.fullmatch(".*BAD[_]+.*BAD") + expected_dtype = "object" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series([True, False, np.nan, False], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +def test_fullmatch_dollar_literal(any_string_dtype): + # GH 56652 + ser = Series(["foo", "foo$foo", np.nan, "foo$"], dtype=any_string_dtype) + result = ser.str.fullmatch("foo\\$") + expected_dtype = "object" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series([False, False, np.nan, True], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +def test_fullmatch_na_kwarg(any_string_dtype): + ser = Series( + ["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"], dtype=any_string_dtype + ) + result = ser.str.fullmatch(".*BAD[_]+.*BAD", na=False) + expected_dtype = np.bool_ if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series([True, False, False, False], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +def test_fullmatch_case_kwarg(any_string_dtype): + ser = Series(["ab", "AB", "abc", "ABC"], dtype=any_string_dtype) + expected_dtype = np.bool_ if any_string_dtype in object_pyarrow_numpy else "boolean" + + expected = Series([True, False, False, False], dtype=expected_dtype) + + result = ser.str.fullmatch("ab", case=True) + tm.assert_series_equal(result, expected) + + expected = Series([True, True, False, False], dtype=expected_dtype) + + result = ser.str.fullmatch("ab", case=False) + tm.assert_series_equal(result, expected) + + with tm.maybe_produces_warning(PerformanceWarning, using_pyarrow(any_string_dtype)): + result = ser.str.fullmatch("ab", flags=re.IGNORECASE) + tm.assert_series_equal(result, expected) + + +# -------------------------------------------------------------------------------------- +# str.findall +# -------------------------------------------------------------------------------------- + + +def test_findall(any_string_dtype): + ser = Series(["fooBAD__barBAD", np.nan, "foo", "BAD"], dtype=any_string_dtype) + result = ser.str.findall("BAD[_]*") + expected = Series([["BAD__", "BAD"], np.nan, [], ["BAD"]]) + expected = _convert_na_value(ser, expected) + tm.assert_series_equal(result, expected) + + +def test_findall_mixed_object(): + ser = Series( + [ + "fooBAD__barBAD", + np.nan, + "foo", + True, + datetime.today(), + "BAD", + None, + 1, + 2.0, + ] + ) + + result = ser.str.findall("BAD[_]*") + expected = Series( + [ + ["BAD__", "BAD"], + np.nan, + [], + np.nan, + np.nan, + ["BAD"], + None, + np.nan, + np.nan, + ] + ) + + tm.assert_series_equal(result, expected) + + +# -------------------------------------------------------------------------------------- +# str.find +# -------------------------------------------------------------------------------------- + + +def test_find(any_string_dtype): + ser = Series( + ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXXX"], dtype=any_string_dtype + ) + expected_dtype = np.int64 if any_string_dtype in object_pyarrow_numpy else "Int64" + + result = ser.str.find("EF") + expected = Series([4, 3, 1, 0, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.find("EF") for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) + + result = ser.str.rfind("EF") + expected = Series([4, 5, 7, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.rfind("EF") for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) + + result = ser.str.find("EF", 3) + expected = Series([4, 3, 7, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.find("EF", 3) for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) + + result = ser.str.rfind("EF", 3) + expected = Series([4, 5, 7, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.rfind("EF", 3) for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) + + result = ser.str.find("EF", 3, 6) + expected = Series([4, 3, -1, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.find("EF", 3, 6) for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) + + result = ser.str.rfind("EF", 3, 6) + expected = Series([4, 3, -1, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.rfind("EF", 3, 6) for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) + + +def test_find_bad_arg_raises(any_string_dtype): + ser = Series([], dtype=any_string_dtype) + with pytest.raises(TypeError, match="expected a string object, not int"): + ser.str.find(0) + + with pytest.raises(TypeError, match="expected a string object, not int"): + ser.str.rfind(0) + + +def test_find_nan(any_string_dtype): + ser = Series( + ["ABCDEFG", np.nan, "DEFGHIJEF", np.nan, "XXXX"], dtype=any_string_dtype + ) + expected_dtype = np.float64 if any_string_dtype in object_pyarrow_numpy else "Int64" + + result = ser.str.find("EF") + expected = Series([4, np.nan, 1, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = ser.str.rfind("EF") + expected = Series([4, np.nan, 7, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = ser.str.find("EF", 3) + expected = Series([4, np.nan, 7, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = ser.str.rfind("EF", 3) + expected = Series([4, np.nan, 7, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = ser.str.find("EF", 3, 6) + expected = Series([4, np.nan, -1, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + result = ser.str.rfind("EF", 3, 6) + expected = Series([4, np.nan, -1, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +# -------------------------------------------------------------------------------------- +# str.translate +# -------------------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] +) +def test_translate(index_or_series, any_string_dtype, infer_string): + obj = index_or_series( + ["abcdefg", "abcc", "cdddfg", "cdefggg"], dtype=any_string_dtype + ) + table = str.maketrans("abc", "cde") + result = obj.str.translate(table) + expected = index_or_series( + ["cdedefg", "cdee", "edddfg", "edefggg"], dtype=any_string_dtype + ) + tm.assert_equal(result, expected) + + +def test_translate_mixed_object(): + # Series with non-string values + s = Series(["a", "b", "c", 1.2]) + table = str.maketrans("abc", "cde") + expected = Series(["c", "d", "e", np.nan], dtype=object) + result = s.str.translate(table) + tm.assert_series_equal(result, expected) + + +# -------------------------------------------------------------------------------------- + + +def test_flags_kwarg(any_string_dtype): + data = { + "Dave": "dave@google.com", + "Steve": "steve@gmail.com", + "Rob": "rob@gmail.com", + "Wes": np.nan, + } + data = Series(data, dtype=any_string_dtype) + + pat = r"([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})" + + use_pyarrow = using_pyarrow(any_string_dtype) + + result = data.str.extract(pat, flags=re.IGNORECASE, expand=True) + assert result.iloc[0].tolist() == ["dave", "google", "com"] + + with tm.maybe_produces_warning(PerformanceWarning, use_pyarrow): + result = data.str.match(pat, flags=re.IGNORECASE) + assert result.iloc[0] + + with tm.maybe_produces_warning(PerformanceWarning, use_pyarrow): + result = data.str.fullmatch(pat, flags=re.IGNORECASE) + assert result.iloc[0] + + result = data.str.findall(pat, flags=re.IGNORECASE) + assert result.iloc[0][0] == ("dave", "google", "com") + + result = data.str.count(pat, flags=re.IGNORECASE) + assert result.iloc[0] == 1 + + msg = "has match groups" + with tm.assert_produces_warning( + UserWarning, match=msg, raise_on_extra_warnings=not use_pyarrow + ): + result = data.str.contains(pat, flags=re.IGNORECASE) + assert result.iloc[0] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_get_dummies.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_get_dummies.py new file mode 100644 index 0000000000000000000000000000000000000000..31386e4e342ae3676a5468cfff5035686821fd52 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_get_dummies.py @@ -0,0 +1,53 @@ +import numpy as np + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + _testing as tm, +) + + +def test_get_dummies(any_string_dtype): + s = Series(["a|b", "a|c", np.nan], dtype=any_string_dtype) + result = s.str.get_dummies("|") + expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]], columns=list("abc")) + tm.assert_frame_equal(result, expected) + + s = Series(["a;b", "a", 7], dtype=any_string_dtype) + result = s.str.get_dummies(";") + expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]], columns=list("7ab")) + tm.assert_frame_equal(result, expected) + + +def test_get_dummies_index(): + # GH9980, GH8028 + idx = Index(["a|b", "a|c", "b|c"]) + result = idx.str.get_dummies("|") + + expected = MultiIndex.from_tuples( + [(1, 1, 0), (1, 0, 1), (0, 1, 1)], names=("a", "b", "c") + ) + tm.assert_index_equal(result, expected) + + +def test_get_dummies_with_name_dummy(any_string_dtype): + # GH 12180 + # Dummies named 'name' should work as expected + s = Series(["a", "b,name", "b"], dtype=any_string_dtype) + result = s.str.get_dummies(",") + expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]], columns=["a", "b", "name"]) + tm.assert_frame_equal(result, expected) + + +def test_get_dummies_with_name_dummy_index(): + # GH 12180 + # Dummies named 'name' should work as expected + idx = Index(["a|b", "name|c", "b|name"]) + result = idx.str.get_dummies("|") + + expected = MultiIndex.from_tuples( + [(1, 1, 0, 0), (0, 0, 1, 1), (0, 1, 0, 1)], names=("a", "b", "c", "name") + ) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_split_partition.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_split_partition.py new file mode 100644 index 0000000000000000000000000000000000000000..9ff1fc0e13ae9ed3514fe02928049d19c0c275a9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_split_partition.py @@ -0,0 +1,734 @@ +from datetime import datetime +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + _testing as tm, +) +from pandas.tests.strings import ( + _convert_na_value, + object_pyarrow_numpy, +) + + +@pytest.mark.parametrize("method", ["split", "rsplit"]) +def test_split(any_string_dtype, method): + values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) + + result = getattr(values.str, method)("_") + exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]]) + exp = _convert_na_value(values, exp) + tm.assert_series_equal(result, exp) + + +@pytest.mark.parametrize("method", ["split", "rsplit"]) +def test_split_more_than_one_char(any_string_dtype, method): + # more than one char + values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype) + result = getattr(values.str, method)("__") + exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]]) + exp = _convert_na_value(values, exp) + tm.assert_series_equal(result, exp) + + result = getattr(values.str, method)("__", expand=False) + tm.assert_series_equal(result, exp) + + +def test_split_more_regex_split(any_string_dtype): + # regex split + values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype) + result = values.str.split("[,_]") + exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]]) + exp = _convert_na_value(values, exp) + tm.assert_series_equal(result, exp) + + +def test_split_regex(any_string_dtype): + # GH 43563 + # explicit regex = True split + values = Series("xxxjpgzzz.jpg", dtype=any_string_dtype) + result = values.str.split(r"\.jpg", regex=True) + exp = Series([["xxxjpgzzz", ""]]) + tm.assert_series_equal(result, exp) + + +def test_split_regex_explicit(any_string_dtype): + # explicit regex = True split with compiled regex + regex_pat = re.compile(r".jpg") + values = Series("xxxjpgzzz.jpg", dtype=any_string_dtype) + result = values.str.split(regex_pat) + exp = Series([["xx", "zzz", ""]]) + tm.assert_series_equal(result, exp) + + # explicit regex = False split + result = values.str.split(r"\.jpg", regex=False) + exp = Series([["xxxjpgzzz.jpg"]]) + tm.assert_series_equal(result, exp) + + # non explicit regex split, pattern length == 1 + result = values.str.split(r".") + exp = Series([["xxxjpgzzz", "jpg"]]) + tm.assert_series_equal(result, exp) + + # non explicit regex split, pattern length != 1 + result = values.str.split(r".jpg") + exp = Series([["xx", "zzz", ""]]) + tm.assert_series_equal(result, exp) + + # regex=False with pattern compiled regex raises error + with pytest.raises( + ValueError, + match="Cannot use a compiled regex as replacement pattern with regex=False", + ): + values.str.split(regex_pat, regex=False) + + +@pytest.mark.parametrize("expand", [None, False]) +@pytest.mark.parametrize("method", ["split", "rsplit"]) +def test_split_object_mixed(expand, method): + mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0]) + result = getattr(mixed.str, method)("_", expand=expand) + exp = Series( + [ + ["a", "b", "c"], + np.nan, + ["d", "e", "f"], + np.nan, + np.nan, + None, + np.nan, + np.nan, + ] + ) + assert isinstance(result, Series) + tm.assert_almost_equal(result, exp) + + +@pytest.mark.parametrize("method", ["split", "rsplit"]) +@pytest.mark.parametrize("n", [None, 0]) +def test_split_n(any_string_dtype, method, n): + s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype) + expected = Series([["a", "b"], pd.NA, ["b", "c"]]) + result = getattr(s.str, method)(" ", n=n) + expected = _convert_na_value(s, expected) + tm.assert_series_equal(result, expected) + + +def test_rsplit(any_string_dtype): + # regex split is not supported by rsplit + values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype) + result = values.str.rsplit("[,_]") + exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]]) + exp = _convert_na_value(values, exp) + tm.assert_series_equal(result, exp) + + +def test_rsplit_max_number(any_string_dtype): + # setting max number of splits, make sure it's from reverse + values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) + result = values.str.rsplit("_", n=1) + exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]]) + exp = _convert_na_value(values, exp) + tm.assert_series_equal(result, exp) + + +def test_split_blank_string(any_string_dtype): + # expand blank split GH 20067 + values = Series([""], name="test", dtype=any_string_dtype) + result = values.str.split(expand=True) + exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df + tm.assert_frame_equal(result, exp) + + +def test_split_blank_string_with_non_empty(any_string_dtype): + values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype) + result = values.str.split(expand=True) + exp = DataFrame( + [ + ["a", "b", "c"], + ["a", "b", None], + [None, None, None], + [None, None, None], + ], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, exp) + + +@pytest.mark.parametrize("method", ["split", "rsplit"]) +def test_split_noargs(any_string_dtype, method): + # #1859 + s = Series(["Wes McKinney", "Travis Oliphant"], dtype=any_string_dtype) + result = getattr(s.str, method)() + expected = ["Travis", "Oliphant"] + assert result[1] == expected + + +@pytest.mark.parametrize( + "data, pat", + [ + (["bd asdf jfg", "kjasdflqw asdfnfk"], None), + (["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"), + (["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"), + ], +) +@pytest.mark.parametrize("n", [-1, 0]) +def test_split_maxsplit(data, pat, any_string_dtype, n): + # re.split 0, str.split -1 + s = Series(data, dtype=any_string_dtype) + + result = s.str.split(pat=pat, n=n) + xp = s.str.split(pat=pat) + tm.assert_series_equal(result, xp) + + +@pytest.mark.parametrize( + "data, pat, expected", + [ + ( + ["split once", "split once too!"], + None, + Series({0: ["split", "once"], 1: ["split", "once too!"]}), + ), + ( + ["split_once", "split_once_too!"], + "_", + Series({0: ["split", "once"], 1: ["split", "once_too!"]}), + ), + ], +) +def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype): + s = Series(data, dtype=any_string_dtype) + result = s.str.split(pat=pat, n=1) + tm.assert_series_equal(expected, result, check_index_type=False) + + +def test_split_to_dataframe_no_splits(any_string_dtype): + s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype) + result = s.str.split("_", expand=True) + exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)}) + tm.assert_frame_equal(result, exp) + + +def test_split_to_dataframe(any_string_dtype): + s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype) + result = s.str.split("_", expand=True) + exp = DataFrame( + {0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]}, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, exp) + + +def test_split_to_dataframe_unequal_splits(any_string_dtype): + s = Series( + ["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype + ) + result = s.str.split("_", expand=True) + exp = DataFrame( + { + 0: ["some", "one"], + 1: ["unequal", "of"], + 2: ["splits", "these"], + 3: [None, "things"], + 4: [None, "is"], + 5: [None, "not"], + }, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, exp) + + +def test_split_to_dataframe_with_index(any_string_dtype): + s = Series( + ["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype + ) + result = s.str.split("_", expand=True) + exp = DataFrame( + {0: ["some", "with"], 1: ["splits", "index"]}, + index=["preserve", "me"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, exp) + + with pytest.raises(ValueError, match="expand must be"): + s.str.split("_", expand="not_a_boolean") + + +def test_split_to_multiindex_expand_no_splits(): + # https://github.com/pandas-dev/pandas/issues/23677 + + idx = Index(["nosplit", "alsonosplit", np.nan]) + result = idx.str.split("_", expand=True) + exp = idx + tm.assert_index_equal(result, exp) + assert result.nlevels == 1 + + +def test_split_to_multiindex_expand(): + idx = Index(["some_equal_splits", "with_no_nans", np.nan, None]) + result = idx.str.split("_", expand=True) + exp = MultiIndex.from_tuples( + [ + ("some", "equal", "splits"), + ("with", "no", "nans"), + [np.nan, np.nan, np.nan], + [None, None, None], + ] + ) + tm.assert_index_equal(result, exp) + assert result.nlevels == 3 + + +def test_split_to_multiindex_expand_unequal_splits(): + idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None]) + result = idx.str.split("_", expand=True) + exp = MultiIndex.from_tuples( + [ + ("some", "unequal", "splits", np.nan, np.nan, np.nan), + ("one", "of", "these", "things", "is", "not"), + (np.nan, np.nan, np.nan, np.nan, np.nan, np.nan), + (None, None, None, None, None, None), + ] + ) + tm.assert_index_equal(result, exp) + assert result.nlevels == 6 + + with pytest.raises(ValueError, match="expand must be"): + idx.str.split("_", expand="not_a_boolean") + + +def test_rsplit_to_dataframe_expand_no_splits(any_string_dtype): + s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype) + result = s.str.rsplit("_", expand=True) + exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype) + tm.assert_frame_equal(result, exp) + + +def test_rsplit_to_dataframe_expand(any_string_dtype): + s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype) + result = s.str.rsplit("_", expand=True) + exp = DataFrame( + {0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]}, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, exp) + + result = s.str.rsplit("_", expand=True, n=2) + exp = DataFrame( + {0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]}, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, exp) + + result = s.str.rsplit("_", expand=True, n=1) + exp = DataFrame( + {0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, exp) + + +def test_rsplit_to_dataframe_expand_with_index(any_string_dtype): + s = Series( + ["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype + ) + result = s.str.rsplit("_", expand=True) + exp = DataFrame( + {0: ["some", "with"], 1: ["splits", "index"]}, + index=["preserve", "me"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, exp) + + +def test_rsplit_to_multiindex_expand_no_split(): + idx = Index(["nosplit", "alsonosplit"]) + result = idx.str.rsplit("_", expand=True) + exp = idx + tm.assert_index_equal(result, exp) + assert result.nlevels == 1 + + +def test_rsplit_to_multiindex_expand(): + idx = Index(["some_equal_splits", "with_no_nans"]) + result = idx.str.rsplit("_", expand=True) + exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")]) + tm.assert_index_equal(result, exp) + assert result.nlevels == 3 + + +def test_rsplit_to_multiindex_expand_n(): + idx = Index(["some_equal_splits", "with_no_nans"]) + result = idx.str.rsplit("_", expand=True, n=1) + exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")]) + tm.assert_index_equal(result, exp) + assert result.nlevels == 2 + + +def test_split_nan_expand(any_string_dtype): + # gh-18450 + s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype) + result = s.str.split(",", expand=True) + exp = DataFrame( + [["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, exp) + + # check that these are actually np.nan/pd.NA and not None + # TODO see GH 18463 + # tm.assert_frame_equal does not differentiate + if any_string_dtype in object_pyarrow_numpy: + assert all(np.isnan(x) for x in result.iloc[1]) + else: + assert all(x is pd.NA for x in result.iloc[1]) + + +def test_split_with_name_series(any_string_dtype): + # GH 12617 + + # should preserve name + s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype) + res = s.str.split(",") + exp = Series([["a", "b"], ["c", "d"]], name="xxx") + tm.assert_series_equal(res, exp) + + res = s.str.split(",", expand=True) + exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype) + tm.assert_frame_equal(res, exp) + + +def test_split_with_name_index(): + # GH 12617 + idx = Index(["a,b", "c,d"], name="xxx") + res = idx.str.split(",") + exp = Index([["a", "b"], ["c", "d"]], name="xxx") + assert res.nlevels == 1 + tm.assert_index_equal(res, exp) + + res = idx.str.split(",", expand=True) + exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")]) + assert res.nlevels == 2 + tm.assert_index_equal(res, exp) + + +@pytest.mark.parametrize( + "method, exp", + [ + [ + "partition", + [ + ("a", "__", "b__c"), + ("c", "__", "d__e"), + np.nan, + ("f", "__", "g__h"), + None, + ], + ], + [ + "rpartition", + [ + ("a__b", "__", "c"), + ("c__d", "__", "e"), + np.nan, + ("f__g", "__", "h"), + None, + ], + ], + ], +) +def test_partition_series_more_than_one_char(method, exp, any_string_dtype): + # https://github.com/pandas-dev/pandas/issues/23558 + # more than one char + s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None], dtype=any_string_dtype) + result = getattr(s.str, method)("__", expand=False) + expected = Series(exp) + expected = _convert_na_value(s, expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", + [ + [ + "partition", + [("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None], + ], + [ + "rpartition", + [("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None], + ], + ], +) +def test_partition_series_none(any_string_dtype, method, exp): + # https://github.com/pandas-dev/pandas/issues/23558 + # None + s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype) + result = getattr(s.str, method)(expand=False) + expected = Series(exp) + expected = _convert_na_value(s, expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", + [ + [ + "partition", + [("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None], + ], + [ + "rpartition", + [("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None], + ], + ], +) +def test_partition_series_not_split(any_string_dtype, method, exp): + # https://github.com/pandas-dev/pandas/issues/23558 + # Not split + s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype) + result = getattr(s.str, method)("_", expand=False) + expected = Series(exp) + expected = _convert_na_value(s, expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", + [ + [ + "partition", + [("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")], + ], + [ + "rpartition", + [("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")], + ], + ], +) +def test_partition_series_unicode(any_string_dtype, method, exp): + # https://github.com/pandas-dev/pandas/issues/23558 + # unicode + s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) + + result = getattr(s.str, method)("_", expand=False) + expected = Series(exp) + expected = _convert_na_value(s, expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["partition", "rpartition"]) +def test_partition_series_stdlib(any_string_dtype, method): + # https://github.com/pandas-dev/pandas/issues/23558 + # compare to standard lib + s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype) + result = getattr(s.str, method)("_", expand=False).tolist() + assert result == [getattr(v, method)("_") for v in s] + + +@pytest.mark.parametrize( + "method, expand, exp, exp_levels", + [ + [ + "partition", + False, + np.array( + [("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None], + dtype=object, + ), + 1, + ], + [ + "rpartition", + False, + np.array( + [("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None], + dtype=object, + ), + 1, + ], + ], +) +def test_partition_index(method, expand, exp, exp_levels): + # https://github.com/pandas-dev/pandas/issues/23558 + + values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None]) + + result = getattr(values.str, method)("_", expand=expand) + exp = Index(exp) + tm.assert_index_equal(result, exp) + assert result.nlevels == exp_levels + + +@pytest.mark.parametrize( + "method, exp", + [ + [ + "partition", + { + 0: ["a", "c", np.nan, "f", None], + 1: ["_", "_", np.nan, "_", None], + 2: ["b_c", "d_e", np.nan, "g_h", None], + }, + ], + [ + "rpartition", + { + 0: ["a_b", "c_d", np.nan, "f_g", None], + 1: ["_", "_", np.nan, "_", None], + 2: ["c", "e", np.nan, "h", None], + }, + ], + ], +) +def test_partition_to_dataframe(any_string_dtype, method, exp): + # https://github.com/pandas-dev/pandas/issues/23558 + + s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype) + result = getattr(s.str, method)("_") + expected = DataFrame( + exp, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", + [ + [ + "partition", + { + 0: ["a", "c", np.nan, "f", None], + 1: ["_", "_", np.nan, "_", None], + 2: ["b_c", "d_e", np.nan, "g_h", None], + }, + ], + [ + "rpartition", + { + 0: ["a_b", "c_d", np.nan, "f_g", None], + 1: ["_", "_", np.nan, "_", None], + 2: ["c", "e", np.nan, "h", None], + }, + ], + ], +) +def test_partition_to_dataframe_from_series(any_string_dtype, method, exp): + # https://github.com/pandas-dev/pandas/issues/23558 + s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype) + result = getattr(s.str, method)("_", expand=True) + expected = DataFrame( + exp, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_partition_with_name(any_string_dtype): + # GH 12617 + + s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype) + result = s.str.partition(",") + expected = DataFrame( + {0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + +def test_partition_with_name_expand(any_string_dtype): + # GH 12617 + # should preserve name + s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype) + result = s.str.partition(",", expand=False) + expected = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx") + tm.assert_series_equal(result, expected) + + +def test_partition_index_with_name(): + idx = Index(["a,b", "c,d"], name="xxx") + result = idx.str.partition(",") + expected = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")]) + assert result.nlevels == 3 + tm.assert_index_equal(result, expected) + + +def test_partition_index_with_name_expand_false(): + idx = Index(["a,b", "c,d"], name="xxx") + # should preserve name + result = idx.str.partition(",", expand=False) + expected = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx") + assert result.nlevels == 1 + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("method", ["partition", "rpartition"]) +def test_partition_sep_kwarg(any_string_dtype, method): + # GH 22676; depr kwarg "pat" in favor of "sep" + s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) + + expected = getattr(s.str, method)(sep="_") + result = getattr(s.str, method)("_") + tm.assert_frame_equal(result, expected) + + +def test_get(): + ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"]) + result = ser.str.split("_").str.get(1) + expected = Series(["b", "d", np.nan, "g"], dtype=object) + tm.assert_series_equal(result, expected) + + +def test_get_mixed_object(): + ser = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0]) + result = ser.str.split("_").str.get(1) + expected = Series( + ["b", np.nan, "d", np.nan, np.nan, None, np.nan, np.nan], dtype=object + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("idx", [2, -3]) +def test_get_bounds(idx): + ser = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"]) + result = ser.str.split("_").str.get(idx) + expected = Series(["3", "8", np.nan], dtype=object) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "idx, exp", [[2, [3, 3, np.nan, "b"]], [-1, [3, 3, np.nan, np.nan]]] +) +def test_get_complex(idx, exp): + # GH 20671, getting value not in dict raising `KeyError` + ser = Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, {1: "a", 2: "b", 3: "c"}]) + + result = ser.str.get(idx) + expected = Series(exp) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("to_type", [tuple, list, np.array]) +def test_get_complex_nested(to_type): + ser = Series([to_type([to_type([1, 2])])]) + + result = ser.str.get(0) + expected = Series([to_type([1, 2])]) + tm.assert_series_equal(result, expected) + + result = ser.str.get(1) + expected = Series([np.nan]) + tm.assert_series_equal(result, expected) + + +def test_get_strings(any_string_dtype): + ser = Series(["a", "ab", np.nan, "abc"], dtype=any_string_dtype) + result = ser.str.get(2) + expected = Series([np.nan, np.nan, np.nan, "c"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_string_array.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_string_array.py new file mode 100644 index 0000000000000000000000000000000000000000..0b3f368afea5ec035ff11f609358e4377afb87fd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_string_array.py @@ -0,0 +1,112 @@ +import numpy as np +import pytest + +from pandas._libs import lib + +from pandas import ( + NA, + DataFrame, + Series, + _testing as tm, + option_context, +) + + +@pytest.mark.filterwarnings("ignore:Falling back") +def test_string_array(nullable_string_dtype, any_string_method): + method_name, args, kwargs = any_string_method + + data = ["a", "bb", np.nan, "ccc"] + a = Series(data, dtype=object) + b = Series(data, dtype=nullable_string_dtype) + + if method_name == "decode": + with pytest.raises(TypeError, match="a bytes-like object is required"): + getattr(b.str, method_name)(*args, **kwargs) + return + + expected = getattr(a.str, method_name)(*args, **kwargs) + result = getattr(b.str, method_name)(*args, **kwargs) + + if isinstance(expected, Series): + if expected.dtype == "object" and lib.is_string_array( + expected.dropna().values, + ): + assert result.dtype == nullable_string_dtype + result = result.astype(object) + + elif expected.dtype == "object" and lib.is_bool_array( + expected.values, skipna=True + ): + assert result.dtype == "boolean" + result = result.astype(object) + + elif expected.dtype == "bool": + assert result.dtype == "boolean" + result = result.astype("bool") + + elif expected.dtype == "float" and expected.isna().any(): + assert result.dtype == "Int64" + result = result.astype("float") + + if expected.dtype == object: + # GH#18463 + expected[expected.isna()] = NA + + elif isinstance(expected, DataFrame): + columns = expected.select_dtypes(include="object").columns + assert all(result[columns].dtypes == nullable_string_dtype) + result[columns] = result[columns].astype(object) + with option_context("future.no_silent_downcasting", True): + expected[columns] = expected[columns].fillna(NA) # GH#18463 + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "method,expected", + [ + ("count", [2, None]), + ("find", [0, None]), + ("index", [0, None]), + ("rindex", [2, None]), + ], +) +def test_string_array_numeric_integer_array(nullable_string_dtype, method, expected): + s = Series(["aba", None], dtype=nullable_string_dtype) + result = getattr(s.str, method)("a") + expected = Series(expected, dtype="Int64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method,expected", + [ + ("isdigit", [False, None, True]), + ("isalpha", [True, None, False]), + ("isalnum", [True, None, True]), + ("isnumeric", [False, None, True]), + ], +) +def test_string_array_boolean_array(nullable_string_dtype, method, expected): + s = Series(["a", None, "1"], dtype=nullable_string_dtype) + result = getattr(s.str, method)() + expected = Series(expected, dtype="boolean") + tm.assert_series_equal(result, expected) + + +def test_string_array_extract(nullable_string_dtype): + # https://github.com/pandas-dev/pandas/issues/30969 + # Only expand=False & multiple groups was failing + + a = Series(["a1", "b2", "cc"], dtype=nullable_string_dtype) + b = Series(["a1", "b2", "cc"], dtype="object") + pat = r"(\w)(\d)" + + result = a.str.extract(pat, expand=False) + expected = b.str.extract(pat, expand=False) + expected = expected.fillna(NA) # GH#18463 + assert all(result.dtypes == nullable_string_dtype) + + result = result.astype(object) + tm.assert_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_strings.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_strings.py new file mode 100644 index 0000000000000000000000000000000000000000..f662dfd7e2b14cc7016e58ab741c92ef5b29abe0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/strings/test_strings.py @@ -0,0 +1,720 @@ +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.core.strings.accessor import StringMethods +from pandas.tests.strings import object_pyarrow_numpy + + +@pytest.mark.parametrize("pattern", [0, True, Series(["foo", "bar"])]) +def test_startswith_endswith_non_str_patterns(pattern): + # GH3485 + ser = Series(["foo", "bar"]) + msg = f"expected a string or tuple, not {type(pattern).__name__}" + with pytest.raises(TypeError, match=msg): + ser.str.startswith(pattern) + with pytest.raises(TypeError, match=msg): + ser.str.endswith(pattern) + + +def test_iter_raises(): + # GH 54173 + ser = Series(["foo", "bar"]) + with pytest.raises(TypeError, match="'StringMethods' object is not iterable"): + iter(ser.str) + + +# test integer/float dtypes (inferred by constructor) and mixed + + +def test_count(any_string_dtype): + ser = Series(["foo", "foofoo", np.nan, "foooofooofommmfoo"], dtype=any_string_dtype) + result = ser.str.count("f[o]+") + expected_dtype = np.float64 if any_string_dtype in object_pyarrow_numpy else "Int64" + expected = Series([1, 2, np.nan, 4], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +def test_count_mixed_object(): + ser = Series( + ["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0], + dtype=object, + ) + result = ser.str.count("a") + expected = Series([1, np.nan, 0, np.nan, np.nan, 0, np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + +def test_repeat(any_string_dtype): + ser = Series(["a", "b", np.nan, "c", np.nan, "d"], dtype=any_string_dtype) + + result = ser.str.repeat(3) + expected = Series( + ["aaa", "bbb", np.nan, "ccc", np.nan, "ddd"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + result = ser.str.repeat([1, 2, 3, 4, 5, 6]) + expected = Series( + ["a", "bb", np.nan, "cccc", np.nan, "dddddd"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) + + +def test_repeat_mixed_object(): + ser = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0]) + result = ser.str.repeat(3) + expected = Series( + ["aaa", np.nan, "bbb", np.nan, np.nan, "foofoofoo", None, np.nan, np.nan], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("arg, repeat", [[None, 4], ["b", None]]) +def test_repeat_with_null(any_string_dtype, arg, repeat): + # GH: 31632 + ser = Series(["a", arg], dtype=any_string_dtype) + result = ser.str.repeat([3, repeat]) + expected = Series(["aaa", None], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_empty_str_methods(any_string_dtype): + empty_str = empty = Series(dtype=any_string_dtype) + if any_string_dtype in object_pyarrow_numpy: + empty_int = Series(dtype="int64") + empty_bool = Series(dtype=bool) + else: + empty_int = Series(dtype="Int64") + empty_bool = Series(dtype="boolean") + empty_object = Series(dtype=object) + empty_bytes = Series(dtype=object) + empty_df = DataFrame() + + # GH7241 + # (extract) on empty series + + tm.assert_series_equal(empty_str, empty.str.cat(empty)) + assert "" == empty.str.cat() + tm.assert_series_equal(empty_str, empty.str.title()) + tm.assert_series_equal(empty_int, empty.str.count("a")) + tm.assert_series_equal(empty_bool, empty.str.contains("a")) + tm.assert_series_equal(empty_bool, empty.str.startswith("a")) + tm.assert_series_equal(empty_bool, empty.str.endswith("a")) + tm.assert_series_equal(empty_str, empty.str.lower()) + tm.assert_series_equal(empty_str, empty.str.upper()) + tm.assert_series_equal(empty_str, empty.str.replace("a", "b")) + tm.assert_series_equal(empty_str, empty.str.repeat(3)) + tm.assert_series_equal(empty_bool, empty.str.match("^a")) + tm.assert_frame_equal( + DataFrame(columns=[0], dtype=any_string_dtype), + empty.str.extract("()", expand=True), + ) + tm.assert_frame_equal( + DataFrame(columns=[0, 1], dtype=any_string_dtype), + empty.str.extract("()()", expand=True), + ) + tm.assert_series_equal(empty_str, empty.str.extract("()", expand=False)) + tm.assert_frame_equal( + DataFrame(columns=[0, 1], dtype=any_string_dtype), + empty.str.extract("()()", expand=False), + ) + tm.assert_frame_equal(empty_df.set_axis([], axis=1), empty.str.get_dummies()) + tm.assert_series_equal(empty_str, empty_str.str.join("")) + tm.assert_series_equal(empty_int, empty.str.len()) + tm.assert_series_equal(empty_object, empty_str.str.findall("a")) + tm.assert_series_equal(empty_int, empty.str.find("a")) + tm.assert_series_equal(empty_int, empty.str.rfind("a")) + tm.assert_series_equal(empty_str, empty.str.pad(42)) + tm.assert_series_equal(empty_str, empty.str.center(42)) + tm.assert_series_equal(empty_object, empty.str.split("a")) + tm.assert_series_equal(empty_object, empty.str.rsplit("a")) + tm.assert_series_equal(empty_object, empty.str.partition("a", expand=False)) + tm.assert_frame_equal(empty_df, empty.str.partition("a")) + tm.assert_series_equal(empty_object, empty.str.rpartition("a", expand=False)) + tm.assert_frame_equal(empty_df, empty.str.rpartition("a")) + tm.assert_series_equal(empty_str, empty.str.slice(stop=1)) + tm.assert_series_equal(empty_str, empty.str.slice(step=1)) + tm.assert_series_equal(empty_str, empty.str.strip()) + tm.assert_series_equal(empty_str, empty.str.lstrip()) + tm.assert_series_equal(empty_str, empty.str.rstrip()) + tm.assert_series_equal(empty_str, empty.str.wrap(42)) + tm.assert_series_equal(empty_str, empty.str.get(0)) + tm.assert_series_equal(empty_object, empty_bytes.str.decode("ascii")) + tm.assert_series_equal(empty_bytes, empty.str.encode("ascii")) + # ismethods should always return boolean (GH 29624) + tm.assert_series_equal(empty_bool, empty.str.isalnum()) + tm.assert_series_equal(empty_bool, empty.str.isalpha()) + tm.assert_series_equal(empty_bool, empty.str.isdigit()) + tm.assert_series_equal(empty_bool, empty.str.isspace()) + tm.assert_series_equal(empty_bool, empty.str.islower()) + tm.assert_series_equal(empty_bool, empty.str.isupper()) + tm.assert_series_equal(empty_bool, empty.str.istitle()) + tm.assert_series_equal(empty_bool, empty.str.isnumeric()) + tm.assert_series_equal(empty_bool, empty.str.isdecimal()) + tm.assert_series_equal(empty_str, empty.str.capitalize()) + tm.assert_series_equal(empty_str, empty.str.swapcase()) + tm.assert_series_equal(empty_str, empty.str.normalize("NFC")) + + table = str.maketrans("a", "b") + tm.assert_series_equal(empty_str, empty.str.translate(table)) + + +@pytest.mark.parametrize( + "method, expected", + [ + ("isalnum", [True, True, True, True, True, False, True, True, False, False]), + ("isalpha", [True, True, True, False, False, False, True, False, False, False]), + ( + "isdigit", + [False, False, False, True, False, False, False, True, False, False], + ), + ( + "isnumeric", + [False, False, False, True, False, False, False, True, False, False], + ), + ( + "isspace", + [False, False, False, False, False, False, False, False, False, True], + ), + ( + "islower", + [False, True, False, False, False, False, False, False, False, False], + ), + ( + "isupper", + [True, False, False, False, True, False, True, False, False, False], + ), + ( + "istitle", + [True, False, True, False, True, False, False, False, False, False], + ), + ], +) +def test_ismethods(method, expected, any_string_dtype): + ser = Series( + ["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "], dtype=any_string_dtype + ) + expected_dtype = "bool" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series(expected, dtype=expected_dtype) + result = getattr(ser.str, method)() + tm.assert_series_equal(result, expected) + + # compare with standard library + expected = [getattr(item, method)() for item in ser] + assert list(result) == expected + + +@pytest.mark.parametrize( + "method, expected", + [ + ("isnumeric", [False, True, True, False, True, True, False]), + ("isdecimal", [False, True, False, False, False, True, False]), + ], +) +def test_isnumeric_unicode(method, expected, any_string_dtype): + # 0x00bc: ¼ VULGAR FRACTION ONE QUARTER + # 0x2605: ★ not number + # 0x1378: ፸ ETHIOPIC NUMBER SEVENTY + # 0xFF13: 3 Em 3 # noqa: RUF003 + ser = Series( + ["A", "3", "¼", "★", "፸", "3", "four"], dtype=any_string_dtype # noqa: RUF001 + ) + expected_dtype = "bool" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series(expected, dtype=expected_dtype) + result = getattr(ser.str, method)() + tm.assert_series_equal(result, expected) + + # compare with standard library + expected = [getattr(item, method)() for item in ser] + assert list(result) == expected + + +@pytest.mark.parametrize( + "method, expected", + [ + ("isnumeric", [False, np.nan, True, False, np.nan, True, False]), + ("isdecimal", [False, np.nan, False, False, np.nan, True, False]), + ], +) +def test_isnumeric_unicode_missing(method, expected, any_string_dtype): + values = ["A", np.nan, "¼", "★", np.nan, "3", "four"] # noqa: RUF001 + ser = Series(values, dtype=any_string_dtype) + expected_dtype = "object" if any_string_dtype in object_pyarrow_numpy else "boolean" + expected = Series(expected, dtype=expected_dtype) + result = getattr(ser.str, method)() + tm.assert_series_equal(result, expected) + + +def test_spilt_join_roundtrip(any_string_dtype): + ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) + result = ser.str.split("_").str.join("_") + expected = ser.astype(object) + tm.assert_series_equal(result, expected) + + +def test_spilt_join_roundtrip_mixed_object(): + ser = Series( + ["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0] + ) + result = ser.str.split("_").str.join("_") + expected = Series( + ["a_b", np.nan, "asdf_cas_asdf", np.nan, np.nan, "foo", None, np.nan, np.nan], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + +def test_len(any_string_dtype): + ser = Series( + ["foo", "fooo", "fooooo", np.nan, "fooooooo", "foo\n", "あ"], + dtype=any_string_dtype, + ) + result = ser.str.len() + expected_dtype = "float64" if any_string_dtype in object_pyarrow_numpy else "Int64" + expected = Series([3, 4, 6, np.nan, 8, 4, 1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +def test_len_mixed(): + ser = Series( + ["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0] + ) + result = ser.str.len() + expected = Series([3, np.nan, 13, np.nan, np.nan, 3, np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method,sub,start,end,expected", + [ + ("index", "EF", None, None, [4, 3, 1, 0]), + ("rindex", "EF", None, None, [4, 5, 7, 4]), + ("index", "EF", 3, None, [4, 3, 7, 4]), + ("rindex", "EF", 3, None, [4, 5, 7, 4]), + ("index", "E", 4, 8, [4, 5, 7, 4]), + ("rindex", "E", 0, 5, [4, 3, 1, 4]), + ], +) +def test_index(method, sub, start, end, index_or_series, any_string_dtype, expected): + obj = index_or_series( + ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype + ) + expected_dtype = np.int64 if any_string_dtype in object_pyarrow_numpy else "Int64" + expected = index_or_series(expected, dtype=expected_dtype) + + result = getattr(obj.str, method)(sub, start, end) + + if index_or_series is Series: + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + + # compare with standard library + expected = [getattr(item, method)(sub, start, end) for item in obj] + assert list(result) == expected + + +def test_index_not_found_raises(index_or_series, any_string_dtype): + obj = index_or_series( + ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype + ) + with pytest.raises(ValueError, match="substring not found"): + obj.str.index("DE") + + +@pytest.mark.parametrize("method", ["index", "rindex"]) +def test_index_wrong_type_raises(index_or_series, any_string_dtype, method): + obj = index_or_series([], dtype=any_string_dtype) + msg = "expected a string object, not int" + + with pytest.raises(TypeError, match=msg): + getattr(obj.str, method)(0) + + +@pytest.mark.parametrize( + "method, exp", + [ + ["index", [1, 1, 0]], + ["rindex", [3, 1, 2]], + ], +) +def test_index_missing(any_string_dtype, method, exp): + ser = Series(["abcb", "ab", "bcbe", np.nan], dtype=any_string_dtype) + expected_dtype = np.float64 if any_string_dtype in object_pyarrow_numpy else "Int64" + + result = getattr(ser.str, method)("b") + expected = Series(exp + [np.nan], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +def test_pipe_failures(any_string_dtype): + # #2119 + ser = Series(["A|B|C"], dtype=any_string_dtype) + + result = ser.str.split("|") + expected = Series([["A", "B", "C"]], dtype=object) + tm.assert_series_equal(result, expected) + + result = ser.str.replace("|", " ", regex=False) + expected = Series(["A B C"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "start, stop, step, expected", + [ + (2, 5, None, ["foo", "bar", np.nan, "baz"]), + (0, 3, -1, ["", "", np.nan, ""]), + (None, None, -1, ["owtoofaa", "owtrabaa", np.nan, "xuqzabaa"]), + (3, 10, 2, ["oto", "ato", np.nan, "aqx"]), + (3, 0, -1, ["ofa", "aba", np.nan, "aba"]), + ], +) +def test_slice(start, stop, step, expected, any_string_dtype): + ser = Series(["aafootwo", "aabartwo", np.nan, "aabazqux"], dtype=any_string_dtype) + result = ser.str.slice(start, stop, step) + expected = Series(expected, dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "start, stop, step, expected", + [ + (2, 5, None, ["foo", np.nan, "bar", np.nan, np.nan, None, np.nan, np.nan]), + (4, 1, -1, ["oof", np.nan, "rab", np.nan, np.nan, None, np.nan, np.nan]), + ], +) +def test_slice_mixed_object(start, stop, step, expected): + ser = Series(["aafootwo", np.nan, "aabartwo", True, datetime.today(), None, 1, 2.0]) + result = ser.str.slice(start, stop, step) + expected = Series(expected, dtype=object) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "start,stop,repl,expected", + [ + (2, 3, None, ["shrt", "a it longer", "evnlongerthanthat", "", np.nan]), + (2, 3, "z", ["shzrt", "a zit longer", "evznlongerthanthat", "z", np.nan]), + (2, 2, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]), + (2, 1, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]), + (-1, None, "z", ["shorz", "a bit longez", "evenlongerthanthaz", "z", np.nan]), + (None, -2, "z", ["zrt", "zer", "zat", "z", np.nan]), + (6, 8, "z", ["shortz", "a bit znger", "evenlozerthanthat", "z", np.nan]), + (-10, 3, "z", ["zrt", "a zit longer", "evenlongzerthanthat", "z", np.nan]), + ], +) +def test_slice_replace(start, stop, repl, expected, any_string_dtype): + ser = Series( + ["short", "a bit longer", "evenlongerthanthat", "", np.nan], + dtype=any_string_dtype, + ) + expected = Series(expected, dtype=any_string_dtype) + result = ser.str.slice_replace(start, stop, repl) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", + [ + ["strip", ["aa", "bb", np.nan, "cc"]], + ["lstrip", ["aa ", "bb \n", np.nan, "cc "]], + ["rstrip", [" aa", " bb", np.nan, "cc"]], + ], +) +def test_strip_lstrip_rstrip(any_string_dtype, method, exp): + ser = Series([" aa ", " bb \n", np.nan, "cc "], dtype=any_string_dtype) + + result = getattr(ser.str, method)() + expected = Series(exp, dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", + [ + ["strip", ["aa", np.nan, "bb"]], + ["lstrip", ["aa ", np.nan, "bb \t\n"]], + ["rstrip", [" aa", np.nan, " bb"]], + ], +) +def test_strip_lstrip_rstrip_mixed_object(method, exp): + ser = Series([" aa ", np.nan, " bb \t\n", True, datetime.today(), None, 1, 2.0]) + + result = getattr(ser.str, method)() + expected = Series(exp + [np.nan, np.nan, None, np.nan, np.nan], dtype=object) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", + [ + ["strip", ["ABC", " BNSD", "LDFJH "]], + ["lstrip", ["ABCxx", " BNSD", "LDFJH xx"]], + ["rstrip", ["xxABC", "xx BNSD", "LDFJH "]], + ], +) +def test_strip_lstrip_rstrip_args(any_string_dtype, method, exp): + ser = Series(["xxABCxx", "xx BNSD", "LDFJH xx"], dtype=any_string_dtype) + + result = getattr(ser.str, method)("x") + expected = Series(exp, dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "prefix, expected", [("a", ["b", " b c", "bc"]), ("ab", ["", "a b c", "bc"])] +) +def test_removeprefix(any_string_dtype, prefix, expected): + ser = Series(["ab", "a b c", "bc"], dtype=any_string_dtype) + result = ser.str.removeprefix(prefix) + ser_expected = Series(expected, dtype=any_string_dtype) + tm.assert_series_equal(result, ser_expected) + + +@pytest.mark.parametrize( + "suffix, expected", [("c", ["ab", "a b ", "b"]), ("bc", ["ab", "a b c", ""])] +) +def test_removesuffix(any_string_dtype, suffix, expected): + ser = Series(["ab", "a b c", "bc"], dtype=any_string_dtype) + result = ser.str.removesuffix(suffix) + ser_expected = Series(expected, dtype=any_string_dtype) + tm.assert_series_equal(result, ser_expected) + + +def test_string_slice_get_syntax(any_string_dtype): + ser = Series( + ["YYY", "B", "C", "YYYYYYbYYY", "BYYYcYYY", np.nan, "CYYYBYYY", "dog", "cYYYt"], + dtype=any_string_dtype, + ) + + result = ser.str[0] + expected = ser.str.get(0) + tm.assert_series_equal(result, expected) + + result = ser.str[:3] + expected = ser.str.slice(stop=3) + tm.assert_series_equal(result, expected) + + result = ser.str[2::-1] + expected = ser.str.slice(start=2, step=-1) + tm.assert_series_equal(result, expected) + + +def test_string_slice_out_of_bounds_nested(): + ser = Series([(1, 2), (1,), (3, 4, 5)]) + result = ser.str[1] + expected = Series([2, np.nan, 4]) + tm.assert_series_equal(result, expected) + + +def test_string_slice_out_of_bounds(any_string_dtype): + ser = Series(["foo", "b", "ba"], dtype=any_string_dtype) + result = ser.str[1] + expected = Series(["o", np.nan, "a"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_encode_decode(any_string_dtype): + ser = Series(["a", "b", "a\xe4"], dtype=any_string_dtype).str.encode("utf-8") + result = ser.str.decode("utf-8") + expected = ser.map(lambda x: x.decode("utf-8")).astype(object) + tm.assert_series_equal(result, expected) + + +def test_encode_errors_kwarg(any_string_dtype): + ser = Series(["a", "b", "a\x9d"], dtype=any_string_dtype) + + msg = ( + r"'charmap' codec can't encode character '\\x9d' in position 1: " + "character maps to " + ) + with pytest.raises(UnicodeEncodeError, match=msg): + ser.str.encode("cp1252") + + result = ser.str.encode("cp1252", "ignore") + expected = ser.map(lambda x: x.encode("cp1252", "ignore")) + tm.assert_series_equal(result, expected) + + +def test_decode_errors_kwarg(): + ser = Series([b"a", b"b", b"a\x9d"]) + + msg = ( + "'charmap' codec can't decode byte 0x9d in position 1: " + "character maps to " + ) + with pytest.raises(UnicodeDecodeError, match=msg): + ser.str.decode("cp1252") + + result = ser.str.decode("cp1252", "ignore") + expected = ser.map(lambda x: x.decode("cp1252", "ignore")).astype(object) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "form, expected", + [ + ("NFKC", ["ABC", "ABC", "123", np.nan, "アイエ"]), + ("NFC", ["ABC", "ABC", "123", np.nan, "アイエ"]), # noqa: RUF001 + ], +) +def test_normalize(form, expected, any_string_dtype): + ser = Series( + ["ABC", "ABC", "123", np.nan, "アイエ"], # noqa: RUF001 + index=["a", "b", "c", "d", "e"], + dtype=any_string_dtype, + ) + expected = Series(expected, index=["a", "b", "c", "d", "e"], dtype=any_string_dtype) + result = ser.str.normalize(form) + tm.assert_series_equal(result, expected) + + +def test_normalize_bad_arg_raises(any_string_dtype): + ser = Series( + ["ABC", "ABC", "123", np.nan, "アイエ"], # noqa: RUF001 + index=["a", "b", "c", "d", "e"], + dtype=any_string_dtype, + ) + with pytest.raises(ValueError, match="invalid normalization form"): + ser.str.normalize("xxx") + + +def test_normalize_index(): + idx = Index(["ABC", "123", "アイエ"]) # noqa: RUF001 + expected = Index(["ABC", "123", "アイエ"]) + result = idx.str.normalize("NFKC") + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "values,inferred_type", + [ + (["a", "b"], "string"), + (["a", "b", 1], "mixed-integer"), + (["a", "b", 1.3], "mixed"), + (["a", "b", 1.3, 1], "mixed-integer"), + (["aa", datetime(2011, 1, 1)], "mixed"), + ], +) +def test_index_str_accessor_visibility(values, inferred_type, index_or_series): + obj = index_or_series(values) + if index_or_series is Index: + assert obj.inferred_type == inferred_type + + assert isinstance(obj.str, StringMethods) + + +@pytest.mark.parametrize( + "values,inferred_type", + [ + ([1, np.nan], "floating"), + ([datetime(2011, 1, 1)], "datetime64"), + ([timedelta(1)], "timedelta64"), + ], +) +def test_index_str_accessor_non_string_values_raises( + values, inferred_type, index_or_series +): + obj = index_or_series(values) + if index_or_series is Index: + assert obj.inferred_type == inferred_type + + msg = "Can only use .str accessor with string values" + with pytest.raises(AttributeError, match=msg): + obj.str + + +def test_index_str_accessor_multiindex_raises(): + # MultiIndex has mixed dtype, but not allow to use accessor + idx = MultiIndex.from_tuples([("a", "b"), ("a", "b")]) + assert idx.inferred_type == "mixed" + + msg = "Can only use .str accessor with Index, not MultiIndex" + with pytest.raises(AttributeError, match=msg): + idx.str + + +def test_str_accessor_no_new_attributes(any_string_dtype): + # https://github.com/pandas-dev/pandas/issues/10673 + ser = Series(list("aabbcde"), dtype=any_string_dtype) + with pytest.raises(AttributeError, match="You cannot add any new attribute"): + ser.str.xlabel = "a" + + +def test_cat_on_bytes_raises(): + lhs = Series(np.array(list("abc"), "S1").astype(object)) + rhs = Series(np.array(list("def"), "S1").astype(object)) + msg = "Cannot use .str.cat with values of inferred dtype 'bytes'" + with pytest.raises(TypeError, match=msg): + lhs.str.cat(rhs) + + +def test_str_accessor_in_apply_func(): + # https://github.com/pandas-dev/pandas/issues/38979 + df = DataFrame(zip("abc", "def")) + expected = Series(["A/D", "B/E", "C/F"]) + result = df.apply(lambda f: "/".join(f.str.upper()), axis=1) + tm.assert_series_equal(result, expected) + + +def test_zfill(): + # https://github.com/pandas-dev/pandas/issues/20868 + value = Series(["-1", "1", "1000", 10, np.nan]) + expected = Series(["-01", "001", "1000", np.nan, np.nan], dtype=object) + tm.assert_series_equal(value.str.zfill(3), expected) + + value = Series(["-2", "+5"]) + expected = Series(["-0002", "+0005"]) + tm.assert_series_equal(value.str.zfill(5), expected) + + +def test_zfill_with_non_integer_argument(): + value = Series(["-2", "+5"]) + wid = "a" + msg = f"width must be of integer type, not {type(wid).__name__}" + with pytest.raises(TypeError, match=msg): + value.str.zfill(wid) + + +def test_zfill_with_leading_sign(): + value = Series(["-cat", "-1", "+dog"]) + expected = Series(["-0cat", "-0001", "+0dog"]) + tm.assert_series_equal(value.str.zfill(5), expected) + + +def test_get_with_dict_label(): + # GH47911 + s = Series( + [ + {"name": "Hello", "value": "World"}, + {"name": "Goodbye", "value": "Planet"}, + {"value": "Sea"}, + ] + ) + result = s.str.get("name") + expected = Series(["Hello", "Goodbye", None], dtype=object) + tm.assert_series_equal(result, expected) + result = s.str.get("value") + expected = Series(["World", "Planet", "Sea"], dtype=object) + tm.assert_series_equal(result, expected) + + +def test_series_str_decode(): + # GH 22613 + result = Series([b"x", b"y"]).str.decode(encoding="UTF-8", errors="strict") + expected = Series(["x", "y"], dtype="object") + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..539ce36ee62fc337d5f3e153e38b408e51945294 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f99968132093119edbbe64cef4daee7cf0851c56 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_array_to_datetime.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_array_to_datetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aef163f9f90bc998f236d14815524848a96dd8b3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_array_to_datetime.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_ccalendar.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_ccalendar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa830b57186eaeb4393227006f71dad19c0f7a93 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_ccalendar.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_conversion.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..791099e0edadca7ebfe362449b56c7ce34a20c1e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_conversion.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_fields.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_fields.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0557bce9ecd52a54250d649e4ca326ac402e76ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_fields.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_libfrequencies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_libfrequencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38effdca81702758ac72eec421fda1f75ef46eea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_libfrequencies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_liboffsets.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_liboffsets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11452d080b81880fef50e5e5a1a0c1183bbb674e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_liboffsets.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_np_datetime.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_np_datetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e2345f96becb4d6072f0adc900101a82740dcc6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_np_datetime.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_npy_units.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_npy_units.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..027ffca78b5459ead2663fc188f0989111fee232 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_npy_units.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_parse_iso8601.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_parse_iso8601.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..391d1dc2cb7038bf691176b0ead7d7ae3d15e856 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_parse_iso8601.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_parsing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_parsing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d68febf3ac074047e7a7b9033851d0eb971277ff Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_parsing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_period.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_period.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a43ac8b0f408d44146bb24d142376f0473a7609 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_period.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_resolution.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_resolution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8464ff8bd858d38221bfe58a8c4d8ef0494cfe1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_resolution.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_strptime.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_strptime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29cbcade14118c7f221704c3534307f7feb8d342 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_strptime.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_timedeltas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_timedeltas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd816461c317ff5a9e85beecf7a29d00397b9f58 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_timedeltas.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_timezones.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_timezones.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a8b22db60bb0372b2cadd41fa13d1537465eed8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_timezones.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_to_offset.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_to_offset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fc30bc38c00ae208d73307889fc0480766a8f95 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_to_offset.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_tzconversion.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_tzconversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9a1ce054a72cc39d39b589cf3b493cfe30dc764 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/__pycache__/test_tzconversion.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_api.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..42d055326c2a54f5fcd6ba2257201eaec2a3122b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_api.py @@ -0,0 +1,65 @@ +"""Tests that the tslibs API is locked down""" + +from pandas._libs import tslibs + + +def test_namespace(): + submodules = [ + "base", + "ccalendar", + "conversion", + "dtypes", + "fields", + "nattype", + "np_datetime", + "offsets", + "parsing", + "period", + "strptime", + "vectorized", + "timedeltas", + "timestamps", + "timezones", + "tzconversion", + ] + + api = [ + "BaseOffset", + "NaT", + "NaTType", + "iNaT", + "nat_strings", + "OutOfBoundsDatetime", + "OutOfBoundsTimedelta", + "Period", + "IncompatibleFrequency", + "Resolution", + "Tick", + "Timedelta", + "dt64arr_to_periodarr", + "Timestamp", + "is_date_array_normalized", + "ints_to_pydatetime", + "normalize_i8_timestamps", + "get_resolution", + "delta_to_nanoseconds", + "ints_to_pytimedelta", + "localize_pydatetime", + "tz_convert_from_utc", + "tz_convert_from_utc_single", + "to_offset", + "tz_compare", + "is_unitless", + "astype_overflowsafe", + "get_unit_from_dtype", + "periods_per_day", + "periods_per_second", + "guess_datetime_format", + "add_overflowsafe", + "get_supported_dtype", + "is_supported_dtype", + ] + + expected = set(submodules + api) + names = [x for x in dir(tslibs) if not x.startswith("__")] + assert set(names) == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_array_to_datetime.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_array_to_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..82175c67764f84355dfb6d10a0477e9e0344e296 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_array_to_datetime.py @@ -0,0 +1,337 @@ +from datetime import ( + date, + datetime, + timedelta, + timezone, +) + +from dateutil.tz.tz import tzoffset +import numpy as np +import pytest + +from pandas._libs import ( + NaT, + iNaT, + tslib, +) +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime + +from pandas import Timestamp +import pandas._testing as tm + +creso_infer = NpyDatetimeUnit.NPY_FR_GENERIC.value + + +class TestArrayToDatetimeResolutionInference: + # TODO: tests that include tzs, ints + + def test_infer_all_nat(self): + arr = np.array([NaT, np.nan], dtype=object) + result, tz = tslib.array_to_datetime(arr, creso=creso_infer) + assert tz is None + assert result.dtype == "M8[s]" + + def test_infer_homogeoneous_datetimes(self): + dt = datetime(2023, 10, 27, 18, 3, 5, 678000) + arr = np.array([dt, dt, dt], dtype=object) + result, tz = tslib.array_to_datetime(arr, creso=creso_infer) + assert tz is None + expected = np.array([dt, dt, dt], dtype="M8[us]") + tm.assert_numpy_array_equal(result, expected) + + def test_infer_homogeoneous_date_objects(self): + dt = datetime(2023, 10, 27, 18, 3, 5, 678000) + dt2 = dt.date() + arr = np.array([None, dt2, dt2, dt2], dtype=object) + result, tz = tslib.array_to_datetime(arr, creso=creso_infer) + assert tz is None + expected = np.array([np.datetime64("NaT"), dt2, dt2, dt2], dtype="M8[s]") + tm.assert_numpy_array_equal(result, expected) + + def test_infer_homogeoneous_dt64(self): + dt = datetime(2023, 10, 27, 18, 3, 5, 678000) + dt64 = np.datetime64(dt, "ms") + arr = np.array([None, dt64, dt64, dt64], dtype=object) + result, tz = tslib.array_to_datetime(arr, creso=creso_infer) + assert tz is None + expected = np.array([np.datetime64("NaT"), dt64, dt64, dt64], dtype="M8[ms]") + tm.assert_numpy_array_equal(result, expected) + + def test_infer_homogeoneous_timestamps(self): + dt = datetime(2023, 10, 27, 18, 3, 5, 678000) + ts = Timestamp(dt).as_unit("ns") + arr = np.array([None, ts, ts, ts], dtype=object) + result, tz = tslib.array_to_datetime(arr, creso=creso_infer) + assert tz is None + expected = np.array([np.datetime64("NaT")] + [ts.asm8] * 3, dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + def test_infer_homogeoneous_datetimes_strings(self): + item = "2023-10-27 18:03:05.678000" + arr = np.array([None, item, item, item], dtype=object) + result, tz = tslib.array_to_datetime(arr, creso=creso_infer) + assert tz is None + expected = np.array([np.datetime64("NaT"), item, item, item], dtype="M8[us]") + tm.assert_numpy_array_equal(result, expected) + + def test_infer_heterogeneous(self): + dtstr = "2023-10-27 18:03:05.678000" + + arr = np.array([dtstr, dtstr[:-3], dtstr[:-7], None], dtype=object) + result, tz = tslib.array_to_datetime(arr, creso=creso_infer) + assert tz is None + expected = np.array(arr, dtype="M8[us]") + tm.assert_numpy_array_equal(result, expected) + + result, tz = tslib.array_to_datetime(arr[::-1], creso=creso_infer) + assert tz is None + tm.assert_numpy_array_equal(result, expected[::-1]) + + @pytest.mark.parametrize( + "item", [float("nan"), NaT.value, float(NaT.value), "NaT", ""] + ) + def test_infer_with_nat_int_float_str(self, item): + # floats/ints get inferred to nanos *unless* they are NaN/iNaT, + # similar NaT string gets treated like NaT scalar (ignored for resolution) + dt = datetime(2023, 11, 15, 15, 5, 6) + + arr = np.array([dt, item], dtype=object) + result, tz = tslib.array_to_datetime(arr, creso=creso_infer) + assert tz is None + expected = np.array([dt, np.datetime64("NaT")], dtype="M8[us]") + tm.assert_numpy_array_equal(result, expected) + + result2, tz2 = tslib.array_to_datetime(arr[::-1], creso=creso_infer) + assert tz2 is None + tm.assert_numpy_array_equal(result2, expected[::-1]) + + +class TestArrayToDatetimeWithTZResolutionInference: + def test_array_to_datetime_with_tz_resolution(self): + tz = tzoffset("custom", 3600) + vals = np.array(["2016-01-01 02:03:04.567", NaT], dtype=object) + res = tslib.array_to_datetime_with_tz(vals, tz, False, False, creso_infer) + assert res.dtype == "M8[ms]" + + vals2 = np.array([datetime(2016, 1, 1, 2, 3, 4), NaT], dtype=object) + res2 = tslib.array_to_datetime_with_tz(vals2, tz, False, False, creso_infer) + assert res2.dtype == "M8[us]" + + vals3 = np.array([NaT, np.datetime64(12345, "s")], dtype=object) + res3 = tslib.array_to_datetime_with_tz(vals3, tz, False, False, creso_infer) + assert res3.dtype == "M8[s]" + + def test_array_to_datetime_with_tz_resolution_all_nat(self): + tz = tzoffset("custom", 3600) + vals = np.array(["NaT"], dtype=object) + res = tslib.array_to_datetime_with_tz(vals, tz, False, False, creso_infer) + assert res.dtype == "M8[s]" + + vals2 = np.array([NaT, NaT], dtype=object) + res2 = tslib.array_to_datetime_with_tz(vals2, tz, False, False, creso_infer) + assert res2.dtype == "M8[s]" + + +@pytest.mark.parametrize( + "data,expected", + [ + ( + ["01-01-2013", "01-02-2013"], + [ + "2013-01-01T00:00:00.000000000", + "2013-01-02T00:00:00.000000000", + ], + ), + ( + ["Mon Sep 16 2013", "Tue Sep 17 2013"], + [ + "2013-09-16T00:00:00.000000000", + "2013-09-17T00:00:00.000000000", + ], + ), + ], +) +def test_parsing_valid_dates(data, expected): + arr = np.array(data, dtype=object) + result, _ = tslib.array_to_datetime(arr) + + expected = np.array(expected, dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "dt_string, expected_tz", + [ + ["01-01-2013 08:00:00+08:00", 480], + ["2013-01-01T08:00:00.000000000+0800", 480], + ["2012-12-31T16:00:00.000000000-0800", -480], + ["12-31-2012 23:00:00-01:00", -60], + ], +) +def test_parsing_timezone_offsets(dt_string, expected_tz): + # All of these datetime strings with offsets are equivalent + # to the same datetime after the timezone offset is added. + arr = np.array(["01-01-2013 00:00:00"], dtype=object) + expected, _ = tslib.array_to_datetime(arr) + + arr = np.array([dt_string], dtype=object) + result, result_tz = tslib.array_to_datetime(arr) + + tm.assert_numpy_array_equal(result, expected) + assert result_tz == timezone(timedelta(minutes=expected_tz)) + + +def test_parsing_non_iso_timezone_offset(): + dt_string = "01-01-2013T00:00:00.000000000+0000" + arr = np.array([dt_string], dtype=object) + + with tm.assert_produces_warning(None): + # GH#50949 should not get tzlocal-deprecation warning here + result, result_tz = tslib.array_to_datetime(arr) + expected = np.array([np.datetime64("2013-01-01 00:00:00.000000000")]) + + tm.assert_numpy_array_equal(result, expected) + assert result_tz is timezone.utc + + +def test_parsing_different_timezone_offsets(): + # see gh-17697 + data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"] + data = np.array(data, dtype=object) + + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + result, result_tz = tslib.array_to_datetime(data) + expected = np.array( + [ + datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)), + datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 23400)), + ], + dtype=object, + ) + + tm.assert_numpy_array_equal(result, expected) + assert result_tz is None + + +@pytest.mark.parametrize( + "data", [["-352.737091", "183.575577"], ["1", "2", "3", "4", "5"]] +) +def test_number_looking_strings_not_into_datetime(data): + # see gh-4601 + # + # These strings don't look like datetimes, so + # they shouldn't be attempted to be converted. + arr = np.array(data, dtype=object) + result, _ = tslib.array_to_datetime(arr, errors="ignore") + + tm.assert_numpy_array_equal(result, arr) + + +@pytest.mark.parametrize( + "invalid_date", + [ + date(1000, 1, 1), + datetime(1000, 1, 1), + "1000-01-01", + "Jan 1, 1000", + np.datetime64("1000-01-01"), + ], +) +@pytest.mark.parametrize("errors", ["coerce", "raise"]) +def test_coerce_outside_ns_bounds(invalid_date, errors): + arr = np.array([invalid_date], dtype="object") + kwargs = {"values": arr, "errors": errors} + + if errors == "raise": + msg = "^Out of bounds nanosecond timestamp: .*, at position 0$" + + with pytest.raises(OutOfBoundsDatetime, match=msg): + tslib.array_to_datetime(**kwargs) + else: # coerce. + result, _ = tslib.array_to_datetime(**kwargs) + expected = np.array([iNaT], dtype="M8[ns]") + + tm.assert_numpy_array_equal(result, expected) + + +def test_coerce_outside_ns_bounds_one_valid(): + arr = np.array(["1/1/1000", "1/1/2000"], dtype=object) + result, _ = tslib.array_to_datetime(arr, errors="coerce") + + expected = [iNaT, "2000-01-01T00:00:00.000000000"] + expected = np.array(expected, dtype="M8[ns]") + + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("errors", ["ignore", "coerce"]) +def test_coerce_of_invalid_datetimes(errors): + arr = np.array(["01-01-2013", "not_a_date", "1"], dtype=object) + kwargs = {"values": arr, "errors": errors} + + if errors == "ignore": + # Without coercing, the presence of any invalid + # dates prevents any values from being converted. + result, _ = tslib.array_to_datetime(**kwargs) + tm.assert_numpy_array_equal(result, arr) + else: # coerce. + # With coercing, the invalid dates becomes iNaT + result, _ = tslib.array_to_datetime(arr, errors="coerce") + expected = ["2013-01-01T00:00:00.000000000", iNaT, iNaT] + + tm.assert_numpy_array_equal(result, np.array(expected, dtype="M8[ns]")) + + +def test_to_datetime_barely_out_of_bounds(): + # see gh-19382, gh-19529 + # + # Close enough to bounds that dropping nanos + # would result in an in-bounds datetime. + arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object) + msg = "^Out of bounds nanosecond timestamp: 2262-04-11 23:47:16, at position 0$" + + with pytest.raises(tslib.OutOfBoundsDatetime, match=msg): + tslib.array_to_datetime(arr) + + +@pytest.mark.parametrize( + "timestamp", + [ + # Close enough to bounds that scaling micros to nanos overflows + # but adding nanos would result in an in-bounds datetime. + "1677-09-21T00:12:43.145224193", + "1677-09-21T00:12:43.145224999", + # this always worked + "1677-09-21T00:12:43.145225000", + ], +) +def test_to_datetime_barely_inside_bounds(timestamp): + # see gh-57150 + result, _ = tslib.array_to_datetime(np.array([timestamp], dtype=object)) + tm.assert_numpy_array_equal(result, np.array([timestamp], dtype="M8[ns]")) + + +class SubDatetime(datetime): + pass + + +@pytest.mark.parametrize( + "data,expected", + [ + ([SubDatetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000"]), + ([datetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000"]), + ([Timestamp(2000, 1, 1)], ["2000-01-01T00:00:00.000000000"]), + ], +) +def test_datetime_subclass(data, expected): + # GH 25851 + # ensure that subclassed datetime works with + # array_to_datetime + + arr = np.array(data, dtype=object) + result, _ = tslib.array_to_datetime(arr) + + expected = np.array(expected, dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_ccalendar.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_ccalendar.py new file mode 100644 index 0000000000000000000000000000000000000000..8dd1bd47e4728d1b35e84b14f29e0a255178ec9b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_ccalendar.py @@ -0,0 +1,63 @@ +from datetime import ( + date, + datetime, +) + +from hypothesis import given +import numpy as np +import pytest + +from pandas._libs.tslibs import ccalendar + +from pandas._testing._hypothesis import DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ + + +@pytest.mark.parametrize( + "date_tuple,expected", + [ + ((2001, 3, 1), 60), + ((2004, 3, 1), 61), + ((1907, 12, 31), 365), # End-of-year, non-leap year. + ((2004, 12, 31), 366), # End-of-year, leap year. + ], +) +def test_get_day_of_year_numeric(date_tuple, expected): + assert ccalendar.get_day_of_year(*date_tuple) == expected + + +def test_get_day_of_year_dt(): + dt = datetime.fromordinal(1 + np.random.default_rng(2).integers(365 * 4000)) + result = ccalendar.get_day_of_year(dt.year, dt.month, dt.day) + + expected = (dt - dt.replace(month=1, day=1)).days + 1 + assert result == expected + + +@pytest.mark.parametrize( + "input_date_tuple, expected_iso_tuple", + [ + [(2020, 1, 1), (2020, 1, 3)], + [(2019, 12, 31), (2020, 1, 2)], + [(2019, 12, 30), (2020, 1, 1)], + [(2009, 12, 31), (2009, 53, 4)], + [(2010, 1, 1), (2009, 53, 5)], + [(2010, 1, 3), (2009, 53, 7)], + [(2010, 1, 4), (2010, 1, 1)], + [(2006, 1, 1), (2005, 52, 7)], + [(2005, 12, 31), (2005, 52, 6)], + [(2008, 12, 28), (2008, 52, 7)], + [(2008, 12, 29), (2009, 1, 1)], + ], +) +def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tuple): + result = ccalendar.get_iso_calendar(*input_date_tuple) + expected_from_date_isocalendar = date(*input_date_tuple).isocalendar() + assert result == expected_from_date_isocalendar + assert result == expected_iso_tuple + + +@given(DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ) +def test_isocalendar(dt): + expected = dt.isocalendar() + result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day) + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_conversion.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..9d7a5e906c3c3771a7d909f0ded9397e5cee0d64 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_conversion.py @@ -0,0 +1,160 @@ +from datetime import datetime + +import numpy as np +import pytest +from pytz import UTC + +from pandas._libs.tslibs import ( + OutOfBoundsTimedelta, + astype_overflowsafe, + conversion, + iNaT, + timezones, + tz_convert_from_utc, + tzconversion, +) + +from pandas import ( + Timestamp, + date_range, +) +import pandas._testing as tm + + +def _compare_utc_to_local(tz_didx): + def f(x): + return tzconversion.tz_convert_from_utc_single(x, tz_didx.tz) + + result = tz_convert_from_utc(tz_didx.asi8, tz_didx.tz) + expected = np.vectorize(f)(tz_didx.asi8) + + tm.assert_numpy_array_equal(result, expected) + + +def _compare_local_to_utc(tz_didx, naive_didx): + # Check that tz_localize behaves the same vectorized and pointwise. + err1 = err2 = None + try: + result = tzconversion.tz_localize_to_utc(naive_didx.asi8, tz_didx.tz) + err1 = None + except Exception as err: + err1 = err + + try: + expected = naive_didx.map(lambda x: x.tz_localize(tz_didx.tz)).asi8 + except Exception as err: + err2 = err + + if err1 is not None: + assert type(err1) == type(err2) + else: + assert err2 is None + tm.assert_numpy_array_equal(result, expected) + + +def test_tz_localize_to_utc_copies(): + # GH#46460 + arr = np.arange(5, dtype="i8") + result = tz_convert_from_utc(arr, tz=UTC) + tm.assert_numpy_array_equal(result, arr) + assert not np.shares_memory(arr, result) + + result = tz_convert_from_utc(arr, tz=None) + tm.assert_numpy_array_equal(result, arr) + assert not np.shares_memory(arr, result) + + +def test_tz_convert_single_matches_tz_convert_hourly(tz_aware_fixture): + tz = tz_aware_fixture + tz_didx = date_range("2014-03-01", "2015-01-10", freq="h", tz=tz) + naive_didx = date_range("2014-03-01", "2015-01-10", freq="h") + + _compare_utc_to_local(tz_didx) + _compare_local_to_utc(tz_didx, naive_didx) + + +@pytest.mark.parametrize("freq", ["D", "YE"]) +def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq): + tz = tz_aware_fixture + tz_didx = date_range("2018-01-01", "2020-01-01", freq=freq, tz=tz) + naive_didx = date_range("2018-01-01", "2020-01-01", freq=freq) + + _compare_utc_to_local(tz_didx) + _compare_local_to_utc(tz_didx, naive_didx) + + +@pytest.mark.parametrize( + "arr", + [ + pytest.param(np.array([], dtype=np.int64), id="empty"), + pytest.param(np.array([iNaT], dtype=np.int64), id="all_nat"), + ], +) +def test_tz_convert_corner(arr): + result = tz_convert_from_utc(arr, timezones.maybe_get_tz("Asia/Tokyo")) + tm.assert_numpy_array_equal(result, arr) + + +def test_tz_convert_readonly(): + # GH#35530 + arr = np.array([0], dtype=np.int64) + arr.setflags(write=False) + result = tz_convert_from_utc(arr, UTC) + tm.assert_numpy_array_equal(result, arr) + + +@pytest.mark.parametrize("copy", [True, False]) +@pytest.mark.parametrize("dtype", ["M8[ns]", "M8[s]"]) +def test_length_zero_copy(dtype, copy): + arr = np.array([], dtype=dtype) + result = astype_overflowsafe(arr, copy=copy, dtype=np.dtype("M8[ns]")) + if copy: + assert not np.shares_memory(result, arr) + elif arr.dtype == result.dtype: + assert result is arr + else: + assert not np.shares_memory(result, arr) + + +def test_ensure_datetime64ns_bigendian(): + # GH#29684 + arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]") + result = astype_overflowsafe(arr, dtype=np.dtype("M8[ns]")) + + expected = np.array([np.datetime64(1, "ms")], dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + +def test_ensure_timedelta64ns_overflows(): + arr = np.arange(10).astype("m8[Y]") * 100 + msg = r"Cannot convert 300 years to timedelta64\[ns\] without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + astype_overflowsafe(arr, dtype=np.dtype("m8[ns]")) + + +class SubDatetime(datetime): + pass + + +@pytest.mark.parametrize( + "dt, expected", + [ + pytest.param( + Timestamp("2000-01-01"), Timestamp("2000-01-01", tz=UTC), id="timestamp" + ), + pytest.param( + datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=UTC), id="datetime" + ), + pytest.param( + SubDatetime(2000, 1, 1), + SubDatetime(2000, 1, 1, tzinfo=UTC), + id="subclassed_datetime", + ), + ], +) +def test_localize_pydatetime_dt_types(dt, expected): + # GH 25851 + # ensure that subclassed datetime works with + # localize_pydatetime + result = conversion.localize_pydatetime(dt, UTC) + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_fields.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_fields.py new file mode 100644 index 0000000000000000000000000000000000000000..da67c093b8f4dbaffba9e02f395bb830de33b489 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_fields.py @@ -0,0 +1,40 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import fields + +import pandas._testing as tm + + +@pytest.fixture +def dtindex(): + dtindex = np.arange(5, dtype=np.int64) * 10**9 * 3600 * 24 * 32 + dtindex.flags.writeable = False + return dtindex + + +def test_get_date_name_field_readonly(dtindex): + # https://github.com/vaexio/vaex/issues/357 + # fields functions shouldn't raise when we pass read-only data + result = fields.get_date_name_field(dtindex, "month_name") + expected = np.array(["January", "February", "March", "April", "May"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_get_date_field_readonly(dtindex): + result = fields.get_date_field(dtindex, "Y") + expected = np.array([1970, 1970, 1970, 1970, 1970], dtype=np.int32) + tm.assert_numpy_array_equal(result, expected) + + +def test_get_start_end_field_readonly(dtindex): + result = fields.get_start_end_field(dtindex, "is_month_start", None) + expected = np.array([True, False, False, False, False], dtype=np.bool_) + tm.assert_numpy_array_equal(result, expected) + + +def test_get_timedelta_field_readonly(dtindex): + # treat dtindex as timedeltas for this next one + result = fields.get_timedelta_field(dtindex, "seconds") + expected = np.array([0] * 5, dtype=np.int32) + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_libfrequencies.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_libfrequencies.py new file mode 100644 index 0000000000000000000000000000000000000000..effd3b4b8b4e5fa113f1b20506997efc54d3c9d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_libfrequencies.py @@ -0,0 +1,27 @@ +import pytest + +from pandas._libs.tslibs.parsing import get_rule_month + +from pandas.tseries import offsets + + +@pytest.mark.parametrize( + "obj,expected", + [ + ("W", "DEC"), + (offsets.Week().freqstr, "DEC"), + ("D", "DEC"), + (offsets.Day().freqstr, "DEC"), + ("Q", "DEC"), + (offsets.QuarterEnd(startingMonth=12).freqstr, "DEC"), + ("Q-JAN", "JAN"), + (offsets.QuarterEnd(startingMonth=1).freqstr, "JAN"), + ("Y-DEC", "DEC"), + (offsets.YearEnd().freqstr, "DEC"), + ("Y-MAY", "MAY"), + (offsets.YearEnd(month=5).freqstr, "MAY"), + ], +) +def test_get_rule_month(obj, expected): + result = get_rule_month(obj) + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_liboffsets.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_liboffsets.py new file mode 100644 index 0000000000000000000000000000000000000000..c189a431146a7172862586ea3a015ad4f2676cf2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_liboffsets.py @@ -0,0 +1,173 @@ +""" +Tests for helper functions in the cython tslibs.offsets +""" +from datetime import datetime + +import pytest + +from pandas._libs.tslibs.ccalendar import ( + get_firstbday, + get_lastbday, +) +import pandas._libs.tslibs.offsets as liboffsets +from pandas._libs.tslibs.offsets import roll_qtrday + +from pandas import Timestamp + + +@pytest.fixture(params=["start", "end", "business_start", "business_end"]) +def day_opt(request): + return request.param + + +@pytest.mark.parametrize( + "dt,exp_week_day,exp_last_day", + [ + (datetime(2017, 11, 30), 3, 30), # Business day. + (datetime(1993, 10, 31), 6, 29), # Non-business day. + ], +) +def test_get_last_bday(dt, exp_week_day, exp_last_day): + assert dt.weekday() == exp_week_day + assert get_lastbday(dt.year, dt.month) == exp_last_day + + +@pytest.mark.parametrize( + "dt,exp_week_day,exp_first_day", + [ + (datetime(2017, 4, 1), 5, 3), # Non-weekday. + (datetime(1993, 10, 1), 4, 1), # Business day. + ], +) +def test_get_first_bday(dt, exp_week_day, exp_first_day): + assert dt.weekday() == exp_week_day + assert get_firstbday(dt.year, dt.month) == exp_first_day + + +@pytest.mark.parametrize( + "months,day_opt,expected", + [ + (0, 15, datetime(2017, 11, 15)), + (0, None, datetime(2017, 11, 30)), + (1, "start", datetime(2017, 12, 1)), + (-145, "end", datetime(2005, 10, 31)), + (0, "business_end", datetime(2017, 11, 30)), + (0, "business_start", datetime(2017, 11, 1)), + ], +) +def test_shift_month_dt(months, day_opt, expected): + dt = datetime(2017, 11, 30) + assert liboffsets.shift_month(dt, months, day_opt=day_opt) == expected + + +@pytest.mark.parametrize( + "months,day_opt,expected", + [ + (1, "start", Timestamp("1929-06-01")), + (-3, "end", Timestamp("1929-02-28")), + (25, None, Timestamp("1931-06-5")), + (-1, 31, Timestamp("1929-04-30")), + ], +) +def test_shift_month_ts(months, day_opt, expected): + ts = Timestamp("1929-05-05") + assert liboffsets.shift_month(ts, months, day_opt=day_opt) == expected + + +def test_shift_month_error(): + dt = datetime(2017, 11, 15) + day_opt = "this should raise" + + with pytest.raises(ValueError, match=day_opt): + liboffsets.shift_month(dt, 3, day_opt=day_opt) + + +@pytest.mark.parametrize( + "other,expected", + [ + # Before March 1. + (datetime(2017, 2, 10), {2: 1, -7: -7, 0: 0}), + # After March 1. + (Timestamp("2014-03-15", tz="US/Eastern"), {2: 2, -7: -6, 0: 1}), + ], +) +@pytest.mark.parametrize("n", [2, -7, 0]) +def test_roll_qtrday_year(other, expected, n): + month = 3 + day_opt = "start" # `other` will be compared to March 1. + + assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n] + + +@pytest.mark.parametrize( + "other,expected", + [ + # Before June 30. + (datetime(1999, 6, 29), {5: 4, -7: -7, 0: 0}), + # After June 30. + (Timestamp(2072, 8, 24, 6, 17, 18), {5: 5, -7: -6, 0: 1}), + ], +) +@pytest.mark.parametrize("n", [5, -7, 0]) +def test_roll_qtrday_year2(other, expected, n): + month = 6 + day_opt = "end" # `other` will be compared to June 30. + + assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n] + + +def test_get_day_of_month_error(): + # get_day_of_month is not directly exposed. + # We test it via roll_qtrday. + dt = datetime(2017, 11, 15) + day_opt = "foo" + + with pytest.raises(ValueError, match=day_opt): + # To hit the raising case we need month == dt.month and n > 0. + roll_qtrday(dt, n=3, month=11, day_opt=day_opt, modby=12) + + +@pytest.mark.parametrize( + "month", + [3, 5], # (other.month % 3) < (month % 3) # (other.month % 3) > (month % 3) +) +@pytest.mark.parametrize("n", [4, -3]) +def test_roll_qtr_day_not_mod_unequal(day_opt, month, n): + expected = {3: {-3: -2, 4: 4}, 5: {-3: -3, 4: 3}} + + other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday. + assert roll_qtrday(other, n, month, day_opt, modby=3) == expected[month][n] + + +@pytest.mark.parametrize( + "other,month,exp_dict", + [ + # Monday. + (datetime(1999, 5, 31), 2, {-1: {"start": 0, "business_start": 0}}), + # Saturday. + ( + Timestamp(2072, 10, 1, 6, 17, 18), + 4, + {2: {"end": 1, "business_end": 1, "business_start": 1}}, + ), + # First business day. + ( + Timestamp(2072, 10, 3, 6, 17, 18), + 4, + {2: {"end": 1, "business_end": 1}, -1: {"start": 0}}, + ), + ], +) +@pytest.mark.parametrize("n", [2, -1]) +def test_roll_qtr_day_mod_equal(other, month, exp_dict, n, day_opt): + # All cases have (other.month % 3) == (month % 3). + expected = exp_dict.get(n, {}).get(day_opt, n) + assert roll_qtrday(other, n, month, day_opt, modby=3) == expected + + +@pytest.mark.parametrize( + "n,expected", [(42, {29: 42, 1: 42, 31: 41}), (-4, {29: -4, 1: -3, 31: -4})] +) +@pytest.mark.parametrize("compare", [29, 1, 31]) +def test_roll_convention(n, expected, compare): + assert liboffsets.roll_convention(29, n, compare) == expected[compare] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_np_datetime.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_np_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..02edf1a09387766d71097ea0baedc2640cfb824b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_np_datetime.py @@ -0,0 +1,222 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas._libs.tslibs.np_datetime import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, + astype_overflowsafe, + is_unitless, + py_get_unit_from_dtype, + py_td64_to_tdstruct, +) + +import pandas._testing as tm + + +def test_is_unitless(): + dtype = np.dtype("M8[ns]") + assert not is_unitless(dtype) + + dtype = np.dtype("datetime64") + assert is_unitless(dtype) + + dtype = np.dtype("m8[ns]") + assert not is_unitless(dtype) + + dtype = np.dtype("timedelta64") + assert is_unitless(dtype) + + msg = "dtype must be datetime64 or timedelta64" + with pytest.raises(ValueError, match=msg): + is_unitless(np.dtype(np.int64)) + + msg = "Argument 'dtype' has incorrect type" + with pytest.raises(TypeError, match=msg): + is_unitless("foo") + + +def test_get_unit_from_dtype(): + # datetime64 + assert py_get_unit_from_dtype(np.dtype("M8[Y]")) == NpyDatetimeUnit.NPY_FR_Y.value + assert py_get_unit_from_dtype(np.dtype("M8[M]")) == NpyDatetimeUnit.NPY_FR_M.value + assert py_get_unit_from_dtype(np.dtype("M8[W]")) == NpyDatetimeUnit.NPY_FR_W.value + # B has been deprecated and removed -> no 3 + assert py_get_unit_from_dtype(np.dtype("M8[D]")) == NpyDatetimeUnit.NPY_FR_D.value + assert py_get_unit_from_dtype(np.dtype("M8[h]")) == NpyDatetimeUnit.NPY_FR_h.value + assert py_get_unit_from_dtype(np.dtype("M8[m]")) == NpyDatetimeUnit.NPY_FR_m.value + assert py_get_unit_from_dtype(np.dtype("M8[s]")) == NpyDatetimeUnit.NPY_FR_s.value + assert py_get_unit_from_dtype(np.dtype("M8[ms]")) == NpyDatetimeUnit.NPY_FR_ms.value + assert py_get_unit_from_dtype(np.dtype("M8[us]")) == NpyDatetimeUnit.NPY_FR_us.value + assert py_get_unit_from_dtype(np.dtype("M8[ns]")) == NpyDatetimeUnit.NPY_FR_ns.value + assert py_get_unit_from_dtype(np.dtype("M8[ps]")) == NpyDatetimeUnit.NPY_FR_ps.value + assert py_get_unit_from_dtype(np.dtype("M8[fs]")) == NpyDatetimeUnit.NPY_FR_fs.value + assert py_get_unit_from_dtype(np.dtype("M8[as]")) == NpyDatetimeUnit.NPY_FR_as.value + + # timedelta64 + assert py_get_unit_from_dtype(np.dtype("m8[Y]")) == NpyDatetimeUnit.NPY_FR_Y.value + assert py_get_unit_from_dtype(np.dtype("m8[M]")) == NpyDatetimeUnit.NPY_FR_M.value + assert py_get_unit_from_dtype(np.dtype("m8[W]")) == NpyDatetimeUnit.NPY_FR_W.value + # B has been deprecated and removed -> no 3 + assert py_get_unit_from_dtype(np.dtype("m8[D]")) == NpyDatetimeUnit.NPY_FR_D.value + assert py_get_unit_from_dtype(np.dtype("m8[h]")) == NpyDatetimeUnit.NPY_FR_h.value + assert py_get_unit_from_dtype(np.dtype("m8[m]")) == NpyDatetimeUnit.NPY_FR_m.value + assert py_get_unit_from_dtype(np.dtype("m8[s]")) == NpyDatetimeUnit.NPY_FR_s.value + assert py_get_unit_from_dtype(np.dtype("m8[ms]")) == NpyDatetimeUnit.NPY_FR_ms.value + assert py_get_unit_from_dtype(np.dtype("m8[us]")) == NpyDatetimeUnit.NPY_FR_us.value + assert py_get_unit_from_dtype(np.dtype("m8[ns]")) == NpyDatetimeUnit.NPY_FR_ns.value + assert py_get_unit_from_dtype(np.dtype("m8[ps]")) == NpyDatetimeUnit.NPY_FR_ps.value + assert py_get_unit_from_dtype(np.dtype("m8[fs]")) == NpyDatetimeUnit.NPY_FR_fs.value + assert py_get_unit_from_dtype(np.dtype("m8[as]")) == NpyDatetimeUnit.NPY_FR_as.value + + +def test_td64_to_tdstruct(): + val = 12454636234 # arbitrary value + + res1 = py_td64_to_tdstruct(val, NpyDatetimeUnit.NPY_FR_ns.value) + exp1 = { + "days": 0, + "hrs": 0, + "min": 0, + "sec": 12, + "ms": 454, + "us": 636, + "ns": 234, + "seconds": 12, + "microseconds": 454636, + "nanoseconds": 234, + } + assert res1 == exp1 + + res2 = py_td64_to_tdstruct(val, NpyDatetimeUnit.NPY_FR_us.value) + exp2 = { + "days": 0, + "hrs": 3, + "min": 27, + "sec": 34, + "ms": 636, + "us": 234, + "ns": 0, + "seconds": 12454, + "microseconds": 636234, + "nanoseconds": 0, + } + assert res2 == exp2 + + res3 = py_td64_to_tdstruct(val, NpyDatetimeUnit.NPY_FR_ms.value) + exp3 = { + "days": 144, + "hrs": 3, + "min": 37, + "sec": 16, + "ms": 234, + "us": 0, + "ns": 0, + "seconds": 13036, + "microseconds": 234000, + "nanoseconds": 0, + } + assert res3 == exp3 + + # Note this out of bounds for nanosecond Timedelta + res4 = py_td64_to_tdstruct(val, NpyDatetimeUnit.NPY_FR_s.value) + exp4 = { + "days": 144150, + "hrs": 21, + "min": 10, + "sec": 34, + "ms": 0, + "us": 0, + "ns": 0, + "seconds": 76234, + "microseconds": 0, + "nanoseconds": 0, + } + assert res4 == exp4 + + +class TestAstypeOverflowSafe: + def test_pass_non_dt64_array(self): + # check that we raise, not segfault + arr = np.arange(5) + dtype = np.dtype("M8[ns]") + + msg = ( + "astype_overflowsafe values.dtype and dtype must be either " + "both-datetime64 or both-timedelta64" + ) + with pytest.raises(TypeError, match=msg): + astype_overflowsafe(arr, dtype, copy=True) + + with pytest.raises(TypeError, match=msg): + astype_overflowsafe(arr, dtype, copy=False) + + def test_pass_non_dt64_dtype(self): + # check that we raise, not segfault + arr = np.arange(5, dtype="i8").view("M8[D]") + dtype = np.dtype("m8[ns]") + + msg = ( + "astype_overflowsafe values.dtype and dtype must be either " + "both-datetime64 or both-timedelta64" + ) + with pytest.raises(TypeError, match=msg): + astype_overflowsafe(arr, dtype, copy=True) + + with pytest.raises(TypeError, match=msg): + astype_overflowsafe(arr, dtype, copy=False) + + def test_astype_overflowsafe_dt64(self): + dtype = np.dtype("M8[ns]") + + dt = np.datetime64("2262-04-05", "D") + arr = dt + np.arange(10, dtype="m8[D]") + + # arr.astype silently overflows, so this + wrong = arr.astype(dtype) + roundtrip = wrong.astype(arr.dtype) + assert not (wrong == roundtrip).all() + + msg = "Out of bounds nanosecond timestamp" + with pytest.raises(OutOfBoundsDatetime, match=msg): + astype_overflowsafe(arr, dtype) + + # But converting to microseconds is fine, and we match numpy's results. + dtype2 = np.dtype("M8[us]") + result = astype_overflowsafe(arr, dtype2) + expected = arr.astype(dtype2) + tm.assert_numpy_array_equal(result, expected) + + def test_astype_overflowsafe_td64(self): + dtype = np.dtype("m8[ns]") + + dt = np.datetime64("2262-04-05", "D") + arr = dt + np.arange(10, dtype="m8[D]") + arr = arr.view("m8[D]") + + # arr.astype silently overflows, so this + wrong = arr.astype(dtype) + roundtrip = wrong.astype(arr.dtype) + assert not (wrong == roundtrip).all() + + msg = r"Cannot convert 106752 days to timedelta64\[ns\] without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + astype_overflowsafe(arr, dtype) + + # But converting to microseconds is fine, and we match numpy's results. + dtype2 = np.dtype("m8[us]") + result = astype_overflowsafe(arr, dtype2) + expected = arr.astype(dtype2) + tm.assert_numpy_array_equal(result, expected) + + def test_astype_overflowsafe_disallow_rounding(self): + arr = np.array([-1500, 1500], dtype="M8[ns]") + dtype = np.dtype("M8[us]") + + msg = "Cannot losslessly cast '-1500 ns' to us" + with pytest.raises(ValueError, match=msg): + astype_overflowsafe(arr, dtype, round_ok=False) + + result = astype_overflowsafe(arr, dtype, round_ok=True) + expected = arr.astype(dtype) + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_npy_units.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_npy_units.py new file mode 100644 index 0000000000000000000000000000000000000000..6d05dc79fbb2cf52688547b672365802463ce6f2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_npy_units.py @@ -0,0 +1,27 @@ +import numpy as np + +from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit +from pandas._libs.tslibs.vectorized import is_date_array_normalized + +# a datetime64 ndarray which *is* normalized +day_arr = np.arange(10, dtype="i8").view("M8[D]") + + +class TestIsDateArrayNormalized: + def test_is_date_array_normalized_day(self): + arr = day_arr + abbrev = "D" + unit = abbrev_to_npy_unit(abbrev) + result = is_date_array_normalized(arr.view("i8"), None, unit) + assert result is True + + def test_is_date_array_normalized_seconds(self): + abbrev = "s" + arr = day_arr.astype(f"M8[{abbrev}]") + unit = abbrev_to_npy_unit(abbrev) + result = is_date_array_normalized(arr.view("i8"), None, unit) + assert result is True + + arr[0] += np.timedelta64(1, abbrev) + result2 = is_date_array_normalized(arr.view("i8"), None, unit) + assert result2 is False diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_parse_iso8601.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_parse_iso8601.py new file mode 100644 index 0000000000000000000000000000000000000000..1992faae2ea6a687f8bd74b4e1e10ba53bb9e901 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_parse_iso8601.py @@ -0,0 +1,119 @@ +from datetime import datetime + +import pytest + +from pandas._libs import tslib + +from pandas import Timestamp + + +@pytest.mark.parametrize( + "date_str, exp", + [ + ("2011-01-02", datetime(2011, 1, 2)), + ("2011-1-2", datetime(2011, 1, 2)), + ("2011-01", datetime(2011, 1, 1)), + ("2011-1", datetime(2011, 1, 1)), + ("2011 01 02", datetime(2011, 1, 2)), + ("2011.01.02", datetime(2011, 1, 2)), + ("2011/01/02", datetime(2011, 1, 2)), + ("2011\\01\\02", datetime(2011, 1, 2)), + ("2013-01-01 05:30:00", datetime(2013, 1, 1, 5, 30)), + ("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30)), + ("2013-1-1 5:30:00+01:00", Timestamp(2013, 1, 1, 5, 30, tz="UTC+01:00")), + ], +) +def test_parsers_iso8601(date_str, exp): + # see gh-12060 + # + # Test only the ISO parser - flexibility to + # different separators and leading zero's. + actual = tslib._test_parse_iso8601(date_str) + assert actual == exp + + +@pytest.mark.parametrize( + "date_str", + [ + "2011-01/02", + "2011=11=11", + "201401", + "201111", + "200101", + # Mixed separated and unseparated. + "2005-0101", + "200501-01", + "20010101 12:3456", + "20010101 1234:56", + # HHMMSS must have two digits in + # each component if unseparated. + "20010101 1", + "20010101 123", + "20010101 12345", + "20010101 12345Z", + ], +) +def test_parsers_iso8601_invalid(date_str): + msg = f'Error parsing datetime string "{date_str}"' + + with pytest.raises(ValueError, match=msg): + tslib._test_parse_iso8601(date_str) + + +def test_parsers_iso8601_invalid_offset_invalid(): + date_str = "2001-01-01 12-34-56" + msg = f'Timezone hours offset out of range in datetime string "{date_str}"' + + with pytest.raises(ValueError, match=msg): + tslib._test_parse_iso8601(date_str) + + +def test_parsers_iso8601_leading_space(): + # GH#25895 make sure isoparser doesn't overflow with long input + date_str, expected = ("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30)) + actual = tslib._test_parse_iso8601(" " * 200 + date_str) + assert actual == expected + + +@pytest.mark.parametrize( + "date_str, timespec, exp", + [ + ("2023-01-01 00:00:00", "auto", "2023-01-01T00:00:00"), + ("2023-01-01 00:00:00", "seconds", "2023-01-01T00:00:00"), + ("2023-01-01 00:00:00", "milliseconds", "2023-01-01T00:00:00.000"), + ("2023-01-01 00:00:00", "microseconds", "2023-01-01T00:00:00.000000"), + ("2023-01-01 00:00:00", "nanoseconds", "2023-01-01T00:00:00.000000000"), + ("2023-01-01 00:00:00.001", "auto", "2023-01-01T00:00:00.001000"), + ("2023-01-01 00:00:00.001", "seconds", "2023-01-01T00:00:00"), + ("2023-01-01 00:00:00.001", "milliseconds", "2023-01-01T00:00:00.001"), + ("2023-01-01 00:00:00.001", "microseconds", "2023-01-01T00:00:00.001000"), + ("2023-01-01 00:00:00.001", "nanoseconds", "2023-01-01T00:00:00.001000000"), + ("2023-01-01 00:00:00.000001", "auto", "2023-01-01T00:00:00.000001"), + ("2023-01-01 00:00:00.000001", "seconds", "2023-01-01T00:00:00"), + ("2023-01-01 00:00:00.000001", "milliseconds", "2023-01-01T00:00:00.000"), + ("2023-01-01 00:00:00.000001", "microseconds", "2023-01-01T00:00:00.000001"), + ("2023-01-01 00:00:00.000001", "nanoseconds", "2023-01-01T00:00:00.000001000"), + ("2023-01-01 00:00:00.000000001", "auto", "2023-01-01T00:00:00.000000001"), + ("2023-01-01 00:00:00.000000001", "seconds", "2023-01-01T00:00:00"), + ("2023-01-01 00:00:00.000000001", "milliseconds", "2023-01-01T00:00:00.000"), + ("2023-01-01 00:00:00.000000001", "microseconds", "2023-01-01T00:00:00.000000"), + ( + "2023-01-01 00:00:00.000000001", + "nanoseconds", + "2023-01-01T00:00:00.000000001", + ), + ("2023-01-01 00:00:00.000001001", "auto", "2023-01-01T00:00:00.000001001"), + ("2023-01-01 00:00:00.000001001", "seconds", "2023-01-01T00:00:00"), + ("2023-01-01 00:00:00.000001001", "milliseconds", "2023-01-01T00:00:00.000"), + ("2023-01-01 00:00:00.000001001", "microseconds", "2023-01-01T00:00:00.000001"), + ( + "2023-01-01 00:00:00.000001001", + "nanoseconds", + "2023-01-01T00:00:00.000001001", + ), + ], +) +def test_iso8601_formatter(date_str: str, timespec: str, exp: str): + # GH#53020 + ts = Timestamp(date_str) + assert ts.isoformat(timespec=timespec) == exp diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_parsing.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..d8f23156bd4d41f7d3cc1434a7b56b245837535d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_parsing.py @@ -0,0 +1,414 @@ +""" +Tests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx +""" +from datetime import datetime +import re + +from dateutil.parser import parse as du_parse +from dateutil.tz import tzlocal +from hypothesis import given +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + parsing, + strptime, +) +from pandas._libs.tslibs.parsing import parse_datetime_string_with_reso +from pandas.compat import ( + ISMUSL, + is_platform_windows, +) +import pandas.util._test_decorators as td + +import pandas._testing as tm +from pandas._testing._hypothesis import DATETIME_NO_TZ + + +@pytest.mark.skipif( + is_platform_windows() or ISMUSL, + reason="TZ setting incorrect on Windows and MUSL Linux", +) +def test_parsing_tzlocal_deprecated(): + # GH#50791 + msg = ( + "Parsing 'EST' as tzlocal.*" + "Pass the 'tz' keyword or call tz_localize after construction instead" + ) + dtstr = "Jan 15 2004 03:00 EST" + + with tm.set_timezone("US/Eastern"): + with tm.assert_produces_warning(FutureWarning, match=msg): + res, _ = parse_datetime_string_with_reso(dtstr) + + assert isinstance(res.tzinfo, tzlocal) + + with tm.assert_produces_warning(FutureWarning, match=msg): + res = parsing.py_parse_datetime_string(dtstr) + assert isinstance(res.tzinfo, tzlocal) + + +def test_parse_datetime_string_with_reso(): + (parsed, reso) = parse_datetime_string_with_reso("4Q1984") + (parsed_lower, reso_lower) = parse_datetime_string_with_reso("4q1984") + + assert reso == reso_lower + assert parsed == parsed_lower + + +def test_parse_datetime_string_with_reso_nanosecond_reso(): + # GH#46811 + parsed, reso = parse_datetime_string_with_reso("2022-04-20 09:19:19.123456789") + assert reso == "nanosecond" + + +def test_parse_datetime_string_with_reso_invalid_type(): + # Raise on invalid input, don't just return it + msg = "Argument 'date_string' has incorrect type (expected str, got tuple)" + with pytest.raises(TypeError, match=re.escape(msg)): + parse_datetime_string_with_reso((4, 5)) + + +@pytest.mark.parametrize( + "dashed,normal", [("1988-Q2", "1988Q2"), ("2Q-1988", "2Q1988")] +) +def test_parse_time_quarter_with_dash(dashed, normal): + # see gh-9688 + (parsed_dash, reso_dash) = parse_datetime_string_with_reso(dashed) + (parsed, reso) = parse_datetime_string_with_reso(normal) + + assert parsed_dash == parsed + assert reso_dash == reso + + +@pytest.mark.parametrize("dashed", ["-2Q1992", "2-Q1992", "4-4Q1992"]) +def test_parse_time_quarter_with_dash_error(dashed): + msg = f"Unknown datetime string format, unable to parse: {dashed}" + + with pytest.raises(parsing.DateParseError, match=msg): + parse_datetime_string_with_reso(dashed) + + +@pytest.mark.parametrize( + "date_string,expected", + [ + ("123.1234", False), + ("-50000", False), + ("999", False), + ("m", False), + ("T", False), + ("Mon Sep 16, 2013", True), + ("2012-01-01", True), + ("01/01/2012", True), + ("01012012", True), + ("0101", True), + ("1-1", True), + ], +) +def test_does_not_convert_mixed_integer(date_string, expected): + assert parsing._does_string_look_like_datetime(date_string) is expected + + +@pytest.mark.parametrize( + "date_str,kwargs,msg", + [ + ( + "2013Q5", + {}, + ( + "Incorrect quarterly string is given, " + "quarter must be between 1 and 4: 2013Q5" + ), + ), + # see gh-5418 + ( + "2013Q1", + {"freq": "INVLD-L-DEC-SAT"}, + ( + "Unable to retrieve month information " + "from given freq: INVLD-L-DEC-SAT" + ), + ), + ], +) +def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg): + with pytest.raises(parsing.DateParseError, match=msg): + parsing.parse_datetime_string_with_reso(date_str, **kwargs) + + +@pytest.mark.parametrize( + "date_str,freq,expected", + [ + ("2013Q2", None, datetime(2013, 4, 1)), + ("2013Q2", "Y-APR", datetime(2012, 8, 1)), + ("2013-Q2", "Y-DEC", datetime(2013, 4, 1)), + ], +) +def test_parsers_quarterly_with_freq(date_str, freq, expected): + result, _ = parsing.parse_datetime_string_with_reso(date_str, freq=freq) + assert result == expected + + +@pytest.mark.parametrize( + "date_str", ["2Q 2005", "2Q-200Y", "2Q-200", "22Q2005", "2Q200.", "6Q-20"] +) +def test_parsers_quarter_invalid(date_str): + if date_str == "6Q-20": + msg = ( + "Incorrect quarterly string is given, quarter " + f"must be between 1 and 4: {date_str}" + ) + else: + msg = f"Unknown datetime string format, unable to parse: {date_str}" + + with pytest.raises(ValueError, match=msg): + parsing.parse_datetime_string_with_reso(date_str) + + +@pytest.mark.parametrize( + "date_str,expected", + [("201101", datetime(2011, 1, 1, 0, 0)), ("200005", datetime(2000, 5, 1, 0, 0))], +) +def test_parsers_month_freq(date_str, expected): + result, _ = parsing.parse_datetime_string_with_reso(date_str, freq="ME") + assert result == expected + + +@td.skip_if_not_us_locale +@pytest.mark.parametrize( + "string,fmt", + [ + ("20111230", "%Y%m%d"), + ("201112300000", "%Y%m%d%H%M"), + ("20111230000000", "%Y%m%d%H%M%S"), + ("20111230T00", "%Y%m%dT%H"), + ("20111230T0000", "%Y%m%dT%H%M"), + ("20111230T000000", "%Y%m%dT%H%M%S"), + ("2011-12-30", "%Y-%m-%d"), + ("2011", "%Y"), + ("2011-01", "%Y-%m"), + ("30-12-2011", "%d-%m-%Y"), + ("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"), + ("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"), + ("2011-12-30T00:00:00UTC", "%Y-%m-%dT%H:%M:%S%Z"), + ("2011-12-30T00:00:00Z", "%Y-%m-%dT%H:%M:%S%z"), + ("2011-12-30T00:00:00+9", "%Y-%m-%dT%H:%M:%S%z"), + ("2011-12-30T00:00:00+09", "%Y-%m-%dT%H:%M:%S%z"), + ("2011-12-30T00:00:00+090", None), + ("2011-12-30T00:00:00+0900", "%Y-%m-%dT%H:%M:%S%z"), + ("2011-12-30T00:00:00-0900", "%Y-%m-%dT%H:%M:%S%z"), + ("2011-12-30T00:00:00+09:00", "%Y-%m-%dT%H:%M:%S%z"), + ("2011-12-30T00:00:00+09:000", None), + ("2011-12-30T00:00:00+9:0", "%Y-%m-%dT%H:%M:%S%z"), + ("2011-12-30T00:00:00+09:", None), + ("2011-12-30T00:00:00.000000UTC", "%Y-%m-%dT%H:%M:%S.%f%Z"), + ("2011-12-30T00:00:00.000000Z", "%Y-%m-%dT%H:%M:%S.%f%z"), + ("2011-12-30T00:00:00.000000+9", "%Y-%m-%dT%H:%M:%S.%f%z"), + ("2011-12-30T00:00:00.000000+09", "%Y-%m-%dT%H:%M:%S.%f%z"), + ("2011-12-30T00:00:00.000000+090", None), + ("2011-12-30T00:00:00.000000+0900", "%Y-%m-%dT%H:%M:%S.%f%z"), + ("2011-12-30T00:00:00.000000-0900", "%Y-%m-%dT%H:%M:%S.%f%z"), + ("2011-12-30T00:00:00.000000+09:00", "%Y-%m-%dT%H:%M:%S.%f%z"), + ("2011-12-30T00:00:00.000000+09:000", None), + ("2011-12-30T00:00:00.000000+9:0", "%Y-%m-%dT%H:%M:%S.%f%z"), + ("2011-12-30T00:00:00.000000+09:", None), + ("2011-12-30 00:00:00.000000", "%Y-%m-%d %H:%M:%S.%f"), + ("Tue 24 Aug 2021 01:30:48", "%a %d %b %Y %H:%M:%S"), + ("Tuesday 24 Aug 2021 01:30:48", "%A %d %b %Y %H:%M:%S"), + ("Tue 24 Aug 2021 01:30:48 AM", "%a %d %b %Y %I:%M:%S %p"), + ("Tuesday 24 Aug 2021 01:30:48 AM", "%A %d %b %Y %I:%M:%S %p"), + ("27.03.2003 14:55:00.000", "%d.%m.%Y %H:%M:%S.%f"), # GH50317 + ], +) +def test_guess_datetime_format_with_parseable_formats(string, fmt): + with tm.maybe_produces_warning( + UserWarning, fmt is not None and re.search(r"%d.*%m", fmt) + ): + result = parsing.guess_datetime_format(string) + assert result == fmt + + +@pytest.mark.parametrize("dayfirst,expected", [(True, "%d/%m/%Y"), (False, "%m/%d/%Y")]) +def test_guess_datetime_format_with_dayfirst(dayfirst, expected): + ambiguous_string = "01/01/2011" + result = parsing.guess_datetime_format(ambiguous_string, dayfirst=dayfirst) + assert result == expected + + +@td.skip_if_not_us_locale +@pytest.mark.parametrize( + "string,fmt", + [ + ("30/Dec/2011", "%d/%b/%Y"), + ("30/December/2011", "%d/%B/%Y"), + ("30/Dec/2011 00:00:00", "%d/%b/%Y %H:%M:%S"), + ], +) +def test_guess_datetime_format_with_locale_specific_formats(string, fmt): + result = parsing.guess_datetime_format(string) + assert result == fmt + + +@pytest.mark.parametrize( + "invalid_dt", + [ + "01/2013", + "12:00:00", + "1/1/1/1", + "this_is_not_a_datetime", + "51a", + "13/2019", + "202001", # YYYYMM isn't ISO8601 + "2020/01", # YYYY/MM isn't ISO8601 either + "87156549591102612381000001219H5", + ], +) +def test_guess_datetime_format_invalid_inputs(invalid_dt): + # A datetime string must include a year, month and a day for it to be + # guessable, in addition to being a string that looks like a datetime. + assert parsing.guess_datetime_format(invalid_dt) is None + + +@pytest.mark.parametrize("invalid_type_dt", [9, datetime(2011, 1, 1)]) +def test_guess_datetime_format_wrong_type_inputs(invalid_type_dt): + # A datetime string must include a year, month and a day for it to be + # guessable, in addition to being a string that looks like a datetime. + with pytest.raises( + TypeError, + match=r"^Argument 'dt_str' has incorrect type \(expected str, got .*\)$", + ): + parsing.guess_datetime_format(invalid_type_dt) + + +@pytest.mark.parametrize( + "string,fmt,dayfirst,warning", + [ + ("2011-1-1", "%Y-%m-%d", False, None), + ("2011-1-1", "%Y-%d-%m", True, None), + ("1/1/2011", "%m/%d/%Y", False, None), + ("1/1/2011", "%d/%m/%Y", True, None), + ("30-1-2011", "%d-%m-%Y", False, UserWarning), + ("30-1-2011", "%d-%m-%Y", True, None), + ("2011-1-1 0:0:0", "%Y-%m-%d %H:%M:%S", False, None), + ("2011-1-1 0:0:0", "%Y-%d-%m %H:%M:%S", True, None), + ("2011-1-3T00:00:0", "%Y-%m-%dT%H:%M:%S", False, None), + ("2011-1-3T00:00:0", "%Y-%d-%mT%H:%M:%S", True, None), + ("2011-1-1 00:00:00", "%Y-%m-%d %H:%M:%S", False, None), + ("2011-1-1 00:00:00", "%Y-%d-%m %H:%M:%S", True, None), + ], +) +def test_guess_datetime_format_no_padding(string, fmt, dayfirst, warning): + # see gh-11142 + msg = ( + rf"Parsing dates in {fmt} format when dayfirst=False \(the default\) " + "was specified. " + "Pass `dayfirst=True` or specify a format to silence this warning." + ) + with tm.assert_produces_warning(warning, match=msg): + result = parsing.guess_datetime_format(string, dayfirst=dayfirst) + assert result == fmt + + +def test_try_parse_dates(): + arr = np.array(["5/1/2000", "6/1/2000", "7/1/2000"], dtype=object) + result = parsing.try_parse_dates(arr, parser=lambda x: du_parse(x, dayfirst=True)) + + expected = np.array([du_parse(d, dayfirst=True) for d in arr]) + tm.assert_numpy_array_equal(result, expected) + + +def test_parse_datetime_string_with_reso_check_instance_type_raise_exception(): + # issue 20684 + msg = "Argument 'date_string' has incorrect type (expected str, got tuple)" + with pytest.raises(TypeError, match=re.escape(msg)): + parse_datetime_string_with_reso((1, 2, 3)) + + result = parse_datetime_string_with_reso("2019") + expected = (datetime(2019, 1, 1), "year") + assert result == expected + + +@pytest.mark.parametrize( + "fmt,expected", + [ + ("%Y %m %d %H:%M:%S", True), + ("%Y/%m/%d %H:%M:%S", True), + (r"%Y\%m\%d %H:%M:%S", True), + ("%Y-%m-%d %H:%M:%S", True), + ("%Y.%m.%d %H:%M:%S", True), + ("%Y%m%d %H:%M:%S", True), + ("%Y-%m-%dT%H:%M:%S", True), + ("%Y-%m-%dT%H:%M:%S%z", True), + ("%Y-%m-%dT%H:%M:%S%Z", False), + ("%Y-%m-%dT%H:%M:%S.%f", True), + ("%Y-%m-%dT%H:%M:%S.%f%z", True), + ("%Y-%m-%dT%H:%M:%S.%f%Z", False), + ("%Y%m%d", True), + ("%Y%m", False), + ("%Y", True), + ("%Y-%m-%d", True), + ("%Y-%m", True), + ], +) +def test_is_iso_format(fmt, expected): + # see gh-41047 + result = strptime._test_format_is_iso(fmt) + assert result == expected + + +@pytest.mark.parametrize( + "input", + [ + "2018-01-01T00:00:00.123456789", + "2018-01-01T00:00:00.123456", + "2018-01-01T00:00:00.123", + ], +) +def test_guess_datetime_format_f(input): + # https://github.com/pandas-dev/pandas/issues/49043 + result = parsing.guess_datetime_format(input) + expected = "%Y-%m-%dT%H:%M:%S.%f" + assert result == expected + + +def _helper_hypothesis_delimited_date(call, date_string, **kwargs): + msg, result = None, None + try: + result = call(date_string, **kwargs) + except ValueError as err: + msg = str(err) + return msg, result + + +@given(DATETIME_NO_TZ) +@pytest.mark.parametrize("delimiter", list(" -./")) +@pytest.mark.parametrize("dayfirst", [True, False]) +@pytest.mark.parametrize( + "date_format", + ["%d %m %Y", "%m %d %Y", "%m %Y", "%Y %m %d", "%y %m %d", "%Y%m%d", "%y%m%d"], +) +def test_hypothesis_delimited_date( + request, date_format, dayfirst, delimiter, test_datetime +): + if date_format == "%m %Y" and delimiter == ".": + request.applymarker( + pytest.mark.xfail( + reason="parse_datetime_string cannot reliably tell whether " + "e.g. %m.%Y is a float or a date" + ) + ) + date_string = test_datetime.strftime(date_format.replace(" ", delimiter)) + + except_out_dateutil, result = _helper_hypothesis_delimited_date( + parsing.py_parse_datetime_string, date_string, dayfirst=dayfirst + ) + except_in_dateutil, expected = _helper_hypothesis_delimited_date( + du_parse, + date_string, + default=datetime(1, 1, 1), + dayfirst=dayfirst, + yearfirst=False, + ) + + assert except_out_dateutil == except_in_dateutil + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_period.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_period.py new file mode 100644 index 0000000000000000000000000000000000000000..715e2d3da88dbc69fd6a376f21b7bba78a46ca9f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_period.py @@ -0,0 +1,123 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + iNaT, + to_offset, +) +from pandas._libs.tslibs.period import ( + extract_ordinals, + get_period_field_arr, + period_asfreq, + period_ordinal, +) + +import pandas._testing as tm + + +def get_freq_code(freqstr: str) -> int: + off = to_offset(freqstr, is_period=True) + # error: "BaseOffset" has no attribute "_period_dtype_code" + code = off._period_dtype_code # type: ignore[attr-defined] + return code + + +@pytest.mark.parametrize( + "freq1,freq2,expected", + [ + ("D", "h", 24), + ("D", "min", 1440), + ("D", "s", 86400), + ("D", "ms", 86400000), + ("D", "us", 86400000000), + ("D", "ns", 86400000000000), + ("h", "min", 60), + ("h", "s", 3600), + ("h", "ms", 3600000), + ("h", "us", 3600000000), + ("h", "ns", 3600000000000), + ("min", "s", 60), + ("min", "ms", 60000), + ("min", "us", 60000000), + ("min", "ns", 60000000000), + ("s", "ms", 1000), + ("s", "us", 1000000), + ("s", "ns", 1000000000), + ("ms", "us", 1000), + ("ms", "ns", 1000000), + ("us", "ns", 1000), + ], +) +def test_intra_day_conversion_factors(freq1, freq2, expected): + assert ( + period_asfreq(1, get_freq_code(freq1), get_freq_code(freq2), False) == expected + ) + + +@pytest.mark.parametrize( + "freq,expected", [("Y", 0), ("M", 0), ("W", 1), ("D", 0), ("B", 0)] +) +def test_period_ordinal_start_values(freq, expected): + # information for Jan. 1, 1970. + assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq_code(freq)) == expected + + +@pytest.mark.parametrize( + "dt,expected", + [ + ((1970, 1, 4, 0, 0, 0, 0, 0), 1), + ((1970, 1, 5, 0, 0, 0, 0, 0), 2), + ((2013, 10, 6, 0, 0, 0, 0, 0), 2284), + ((2013, 10, 7, 0, 0, 0, 0, 0), 2285), + ], +) +def test_period_ordinal_week(dt, expected): + args = dt + (get_freq_code("W"),) + assert period_ordinal(*args) == expected + + +@pytest.mark.parametrize( + "day,expected", + [ + # Thursday (Oct. 3, 2013). + (3, 11415), + # Friday (Oct. 4, 2013). + (4, 11416), + # Saturday (Oct. 5, 2013). + (5, 11417), + # Sunday (Oct. 6, 2013). + (6, 11417), + # Monday (Oct. 7, 2013). + (7, 11417), + # Tuesday (Oct. 8, 2013). + (8, 11418), + ], +) +def test_period_ordinal_business_day(day, expected): + # 5000 is PeriodDtypeCode for BusinessDay + args = (2013, 10, day, 0, 0, 0, 0, 0, 5000) + assert period_ordinal(*args) == expected + + +class TestExtractOrdinals: + def test_extract_ordinals_raises(self): + # with non-object, make sure we raise TypeError, not segfault + arr = np.arange(5) + freq = to_offset("D") + with pytest.raises(TypeError, match="values must be object-dtype"): + extract_ordinals(arr, freq) + + def test_extract_ordinals_2d(self): + freq = to_offset("D") + arr = np.empty(10, dtype=object) + arr[:] = iNaT + + res = extract_ordinals(arr, freq) + res2 = extract_ordinals(arr.reshape(5, 2), freq) + tm.assert_numpy_array_equal(res, res2.reshape(-1)) + + +def test_get_period_field_array_raises_on_out_of_range(): + msg = "Buffer dtype mismatch, expected 'const int64_t' but got 'double'" + with pytest.raises(ValueError, match=msg): + get_period_field_arr(-1, np.empty(1), 0) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_resolution.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_resolution.py new file mode 100644 index 0000000000000000000000000000000000000000..690962f1daa5eebd047d11297914eb36b494e0dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_resolution.py @@ -0,0 +1,57 @@ +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs import ( + Resolution, + get_resolution, +) +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit + +import pandas._testing as tm + + +def test_get_resolution_nano(): + # don't return the fallback RESO_DAY + arr = np.array([1], dtype=np.int64) + res = get_resolution(arr) + assert res == Resolution.RESO_NS + + +def test_get_resolution_non_nano_data(): + arr = np.array([1], dtype=np.int64) + res = get_resolution(arr, None, NpyDatetimeUnit.NPY_FR_us.value) + assert res == Resolution.RESO_US + + res = get_resolution(arr, pytz.UTC, NpyDatetimeUnit.NPY_FR_us.value) + assert res == Resolution.RESO_US + + +@pytest.mark.parametrize( + "freqstr,expected", + [ + ("Y", "year"), + ("Q", "quarter"), + ("M", "month"), + ("D", "day"), + ("h", "hour"), + ("min", "minute"), + ("s", "second"), + ("ms", "millisecond"), + ("us", "microsecond"), + ("ns", "nanosecond"), + ], +) +def test_get_attrname_from_abbrev(freqstr, expected): + reso = Resolution.get_reso_from_freqstr(freqstr) + assert reso.attr_abbrev == freqstr + assert reso.attrname == expected + + +@pytest.mark.parametrize("freq", ["A", "H", "T", "S", "L", "U", "N"]) +def test_units_A_H_T_S_L_U_N_deprecated_from_attrname_to_abbrevs(freq): + # GH#52536 + msg = f"'{freq}' is deprecated and will be removed in a future version." + + with tm.assert_produces_warning(FutureWarning, match=msg): + Resolution.get_reso_from_freqstr(freq) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_strptime.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_strptime.py new file mode 100644 index 0000000000000000000000000000000000000000..d726006b03f6d43cb94a91518daabb2b29b757e0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_strptime.py @@ -0,0 +1,110 @@ +from datetime import ( + datetime, + timezone, +) + +import numpy as np +import pytest + +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas._libs.tslibs.strptime import array_strptime + +from pandas import ( + NaT, + Timestamp, +) +import pandas._testing as tm + +creso_infer = NpyDatetimeUnit.NPY_FR_GENERIC.value + + +class TestArrayStrptimeResolutionInference: + def test_array_strptime_resolution_all_nat(self): + arr = np.array([NaT, np.nan], dtype=object) + + fmt = "%Y-%m-%d %H:%M:%S" + res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer) + assert res.dtype == "M8[s]" + + res, _ = array_strptime(arr, fmt=fmt, utc=True, creso=creso_infer) + assert res.dtype == "M8[s]" + + @pytest.mark.parametrize("tz", [None, timezone.utc]) + def test_array_strptime_resolution_inference_homogeneous_strings(self, tz): + dt = datetime(2016, 1, 2, 3, 4, 5, 678900, tzinfo=tz) + + fmt = "%Y-%m-%d %H:%M:%S" + dtstr = dt.strftime(fmt) + arr = np.array([dtstr] * 3, dtype=object) + expected = np.array([dt.replace(tzinfo=None)] * 3, dtype="M8[s]") + + res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer) + tm.assert_numpy_array_equal(res, expected) + + fmt = "%Y-%m-%d %H:%M:%S.%f" + dtstr = dt.strftime(fmt) + arr = np.array([dtstr] * 3, dtype=object) + expected = np.array([dt.replace(tzinfo=None)] * 3, dtype="M8[us]") + + res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer) + tm.assert_numpy_array_equal(res, expected) + + fmt = "ISO8601" + res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer) + tm.assert_numpy_array_equal(res, expected) + + @pytest.mark.parametrize("tz", [None, timezone.utc]) + def test_array_strptime_resolution_mixed(self, tz): + dt = datetime(2016, 1, 2, 3, 4, 5, 678900, tzinfo=tz) + + ts = Timestamp(dt).as_unit("ns") + + arr = np.array([dt, ts], dtype=object) + expected = np.array( + [Timestamp(dt).as_unit("ns").asm8, ts.asm8], + dtype="M8[ns]", + ) + + fmt = "%Y-%m-%d %H:%M:%S" + res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer) + tm.assert_numpy_array_equal(res, expected) + + fmt = "ISO8601" + res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer) + tm.assert_numpy_array_equal(res, expected) + + def test_array_strptime_resolution_todaynow(self): + # specifically case where today/now is the *first* item + vals = np.array(["today", np.datetime64("2017-01-01", "us")], dtype=object) + + now = Timestamp("now").asm8 + res, _ = array_strptime(vals, fmt="%Y-%m-%d", utc=False, creso=creso_infer) + res2, _ = array_strptime( + vals[::-1], fmt="%Y-%m-%d", utc=False, creso=creso_infer + ) + + # 1s is an arbitrary cutoff for call overhead; in local testing the + # actual difference is about 250us + tolerance = np.timedelta64(1, "s") + + assert res.dtype == "M8[us]" + assert abs(res[0] - now) < tolerance + assert res[1] == vals[1] + + assert res2.dtype == "M8[us]" + assert abs(res2[1] - now) < tolerance * 2 + assert res2[0] == vals[1] + + def test_array_strptime_str_outside_nano_range(self): + vals = np.array(["2401-09-15"], dtype=object) + expected = np.array(["2401-09-15"], dtype="M8[s]") + fmt = "ISO8601" + res, _ = array_strptime(vals, fmt=fmt, creso=creso_infer) + tm.assert_numpy_array_equal(res, expected) + + # non-iso -> different path + vals2 = np.array(["Sep 15, 2401"], dtype=object) + expected2 = np.array(["2401-09-15"], dtype="M8[s]") + fmt2 = "%b %d, %Y" + res2, _ = array_strptime(vals2, fmt=fmt2, creso=creso_infer) + tm.assert_numpy_array_equal(res2, expected2) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_timedeltas.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_timedeltas.py new file mode 100644 index 0000000000000000000000000000000000000000..4784a6d0d600dcc77e359fb3d7d56301f78270d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_timedeltas.py @@ -0,0 +1,149 @@ +import re + +import numpy as np +import pytest + +from pandas._libs.tslibs.timedeltas import ( + array_to_timedelta64, + delta_to_nanoseconds, + ints_to_pytimedelta, +) + +from pandas import ( + Timedelta, + offsets, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "obj,expected", + [ + (np.timedelta64(14, "D"), 14 * 24 * 3600 * 1e9), + (Timedelta(minutes=-7), -7 * 60 * 1e9), + (Timedelta(minutes=-7).to_pytimedelta(), -7 * 60 * 1e9), + (Timedelta(seconds=1234e-9), 1234), # GH43764, GH40946 + ( + Timedelta(seconds=1e-9, milliseconds=1e-5, microseconds=1e-1), + 111, + ), # GH43764 + ( + Timedelta(days=1, seconds=1e-9, milliseconds=1e-5, microseconds=1e-1), + 24 * 3600e9 + 111, + ), # GH43764 + (offsets.Nano(125), 125), + ], +) +def test_delta_to_nanoseconds(obj, expected): + result = delta_to_nanoseconds(obj) + assert result == expected + + +def test_delta_to_nanoseconds_error(): + obj = np.array([123456789], dtype="m8[ns]") + + with pytest.raises(TypeError, match=""): + delta_to_nanoseconds(obj) + + with pytest.raises(TypeError, match="float"): + delta_to_nanoseconds(1.5) + with pytest.raises(TypeError, match="int"): + delta_to_nanoseconds(1) + with pytest.raises(TypeError, match="int"): + delta_to_nanoseconds(np.int64(2)) + with pytest.raises(TypeError, match="int"): + delta_to_nanoseconds(np.int32(3)) + + +def test_delta_to_nanoseconds_td64_MY_raises(): + msg = ( + "delta_to_nanoseconds does not support Y or M units, " + "as their duration in nanoseconds is ambiguous" + ) + + td = np.timedelta64(1234, "Y") + + with pytest.raises(ValueError, match=msg): + delta_to_nanoseconds(td) + + td = np.timedelta64(1234, "M") + + with pytest.raises(ValueError, match=msg): + delta_to_nanoseconds(td) + + +@pytest.mark.parametrize("unit", ["Y", "M"]) +def test_unsupported_td64_unit_raises(unit): + # GH 52806 + with pytest.raises( + ValueError, + match=f"Unit {unit} is not supported. " + "Only unambiguous timedelta values durations are supported. " + "Allowed units are 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'", + ): + Timedelta(np.timedelta64(1, unit)) + + +def test_huge_nanoseconds_overflow(): + # GH 32402 + assert delta_to_nanoseconds(Timedelta(1e10)) == 1e10 + assert delta_to_nanoseconds(Timedelta(nanoseconds=1e10)) == 1e10 + + +@pytest.mark.parametrize( + "kwargs", [{"Seconds": 1}, {"seconds": 1, "Nanoseconds": 1}, {"Foo": 2}] +) +def test_kwarg_assertion(kwargs): + err_message = ( + "cannot construct a Timedelta from the passed arguments, " + "allowed keywords are " + "[weeks, days, hours, minutes, seconds, " + "milliseconds, microseconds, nanoseconds]" + ) + + with pytest.raises(ValueError, match=re.escape(err_message)): + Timedelta(**kwargs) + + +class TestArrayToTimedelta64: + def test_array_to_timedelta64_string_with_unit_2d_raises(self): + # check the 'unit is not None and errors != "coerce"' path + # in array_to_timedelta64 raises correctly with 2D values + values = np.array([["1", 2], [3, "4"]], dtype=object) + with pytest.raises(ValueError, match="unit must not be specified"): + array_to_timedelta64(values, unit="s") + + def test_array_to_timedelta64_non_object_raises(self): + # check we raise, not segfault + values = np.arange(5) + + msg = "'values' must have object dtype" + with pytest.raises(TypeError, match=msg): + array_to_timedelta64(values) + + +@pytest.mark.parametrize("unit", ["s", "ms", "us"]) +def test_ints_to_pytimedelta(unit): + # tests for non-nanosecond cases + arr = np.arange(6, dtype=np.int64).view(f"m8[{unit}]") + + res = ints_to_pytimedelta(arr, box=False) + # For non-nanosecond, .astype(object) gives pytimedelta objects + # instead of integers + expected = arr.astype(object) + tm.assert_numpy_array_equal(res, expected) + + res = ints_to_pytimedelta(arr, box=True) + expected = np.array([Timedelta(x) for x in arr], dtype=object) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize("unit", ["Y", "M", "ps", "fs", "as"]) +def test_ints_to_pytimedelta_unsupported(unit): + arr = np.arange(6, dtype=np.int64).view(f"m8[{unit}]") + + with pytest.raises(NotImplementedError, match=r"\d{1,2}"): + ints_to_pytimedelta(arr, box=False) + msg = "Only resolutions 's', 'ms', 'us', 'ns' are supported" + with pytest.raises(NotImplementedError, match=msg): + ints_to_pytimedelta(arr, box=True) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_timezones.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_timezones.py new file mode 100644 index 0000000000000000000000000000000000000000..28e4889983fb964167dd74623c8e4c4585c99a96 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_timezones.py @@ -0,0 +1,168 @@ +from datetime import ( + datetime, + timedelta, + timezone, +) + +import dateutil.tz +import pytest +import pytz + +from pandas._libs.tslibs import ( + conversion, + timezones, +) +from pandas.compat import is_platform_windows + +from pandas import Timestamp + + +def test_is_utc(utc_fixture): + tz = timezones.maybe_get_tz(utc_fixture) + assert timezones.is_utc(tz) + + +@pytest.mark.parametrize("tz_name", list(pytz.common_timezones)) +def test_cache_keys_are_distinct_for_pytz_vs_dateutil(tz_name): + tz_p = timezones.maybe_get_tz(tz_name) + tz_d = timezones.maybe_get_tz("dateutil/" + tz_name) + + if tz_d is None: + pytest.skip(tz_name + ": dateutil does not know about this one") + + if not (tz_name == "UTC" and is_platform_windows()): + # they both end up as tzwin("UTC") on windows + assert timezones._p_tz_cache_key(tz_p) != timezones._p_tz_cache_key(tz_d) + + +def test_tzlocal_repr(): + # see gh-13583 + ts = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()) + assert ts.tz == dateutil.tz.tzlocal() + assert "tz='tzlocal()')" in repr(ts) + + +def test_tzlocal_maybe_get_tz(): + # see gh-13583 + tz = timezones.maybe_get_tz("tzlocal()") + assert tz == dateutil.tz.tzlocal() + + +def test_tzlocal_offset(): + # see gh-13583 + # + # Get offset using normal datetime for test. + ts = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()) + + offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) + offset = offset.total_seconds() + + assert ts._value + offset == Timestamp("2011-01-01")._value + + +def test_tzlocal_is_not_utc(): + # even if the machine running the test is localized to UTC + tz = dateutil.tz.tzlocal() + assert not timezones.is_utc(tz) + + assert not timezones.tz_compare(tz, dateutil.tz.tzutc()) + + +def test_tz_compare_utc(utc_fixture, utc_fixture2): + tz = timezones.maybe_get_tz(utc_fixture) + tz2 = timezones.maybe_get_tz(utc_fixture2) + assert timezones.tz_compare(tz, tz2) + + +@pytest.fixture( + params=[ + (pytz.timezone("US/Eastern"), lambda tz, x: tz.localize(x)), + (dateutil.tz.gettz("US/Eastern"), lambda tz, x: x.replace(tzinfo=tz)), + ] +) +def infer_setup(request): + eastern, localize = request.param + + start_naive = datetime(2001, 1, 1) + end_naive = datetime(2009, 1, 1) + + start = localize(eastern, start_naive) + end = localize(eastern, end_naive) + + return eastern, localize, start, end, start_naive, end_naive + + +def test_infer_tz_compat(infer_setup): + eastern, _, start, end, start_naive, end_naive = infer_setup + + assert ( + timezones.infer_tzinfo(start, end) + is conversion.localize_pydatetime(start_naive, eastern).tzinfo + ) + assert ( + timezones.infer_tzinfo(start, None) + is conversion.localize_pydatetime(start_naive, eastern).tzinfo + ) + assert ( + timezones.infer_tzinfo(None, end) + is conversion.localize_pydatetime(end_naive, eastern).tzinfo + ) + + +def test_infer_tz_utc_localize(infer_setup): + _, _, start, end, start_naive, end_naive = infer_setup + utc = pytz.utc + + start = utc.localize(start_naive) + end = utc.localize(end_naive) + + assert timezones.infer_tzinfo(start, end) is utc + + +@pytest.mark.parametrize("ordered", [True, False]) +def test_infer_tz_mismatch(infer_setup, ordered): + eastern, _, _, _, start_naive, end_naive = infer_setup + msg = "Inputs must both have the same timezone" + + utc = pytz.utc + start = utc.localize(start_naive) + end = conversion.localize_pydatetime(end_naive, eastern) + + args = (start, end) if ordered else (end, start) + + with pytest.raises(AssertionError, match=msg): + timezones.infer_tzinfo(*args) + + +def test_maybe_get_tz_invalid_types(): + with pytest.raises(TypeError, match=""): + timezones.maybe_get_tz(44.0) + + with pytest.raises(TypeError, match=""): + timezones.maybe_get_tz(pytz) + + msg = "" + with pytest.raises(TypeError, match=msg): + timezones.maybe_get_tz(Timestamp("2021-01-01", tz="UTC")) + + +def test_maybe_get_tz_offset_only(): + # see gh-36004 + + # timezone.utc + tz = timezones.maybe_get_tz(timezone.utc) + assert tz == timezone(timedelta(hours=0, minutes=0)) + + # without UTC+- prefix + tz = timezones.maybe_get_tz("+01:15") + assert tz == timezone(timedelta(hours=1, minutes=15)) + + tz = timezones.maybe_get_tz("-01:15") + assert tz == timezone(-timedelta(hours=1, minutes=15)) + + # with UTC+- prefix + tz = timezones.maybe_get_tz("UTC+02:45") + assert tz == timezone(timedelta(hours=2, minutes=45)) + + tz = timezones.maybe_get_tz("UTC-02:45") + assert tz == timezone(-timedelta(hours=2, minutes=45)) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_to_offset.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_to_offset.py new file mode 100644 index 0000000000000000000000000000000000000000..8ca55648f3780e6f31621f7b5cfdcd1435b1a231 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_to_offset.py @@ -0,0 +1,219 @@ +import re + +import pytest + +from pandas._libs.tslibs import ( + Timedelta, + offsets, + to_offset, +) + + +@pytest.mark.parametrize( + "freq_input,expected", + [ + (to_offset("10us"), offsets.Micro(10)), + (offsets.Hour(), offsets.Hour()), + ("2h30min", offsets.Minute(150)), + ("2h 30min", offsets.Minute(150)), + ("2h30min15s", offsets.Second(150 * 60 + 15)), + ("2h 60min", offsets.Hour(3)), + ("2h 20.5min", offsets.Second(8430)), + ("1.5min", offsets.Second(90)), + ("0.5s", offsets.Milli(500)), + ("15ms500us", offsets.Micro(15500)), + ("10s75ms", offsets.Milli(10075)), + ("1s0.25ms", offsets.Micro(1000250)), + ("1s0.25ms", offsets.Micro(1000250)), + ("2800ns", offsets.Nano(2800)), + ("2SME", offsets.SemiMonthEnd(2)), + ("2SME-16", offsets.SemiMonthEnd(2, day_of_month=16)), + ("2SMS-14", offsets.SemiMonthBegin(2, day_of_month=14)), + ("2SMS-15", offsets.SemiMonthBegin(2)), + ], +) +def test_to_offset(freq_input, expected): + result = to_offset(freq_input) + assert result == expected + + +@pytest.mark.parametrize( + "freqstr,expected", [("-1s", -1), ("-2SME", -2), ("-1SMS", -1), ("-5min10s", -310)] +) +def test_to_offset_negative(freqstr, expected): + result = to_offset(freqstr) + assert result.n == expected + + +@pytest.mark.filterwarnings("ignore:.*'m' is deprecated.*:FutureWarning") +@pytest.mark.parametrize( + "freqstr", + [ + "2h20m", + "us1", + "-us", + "3us1", + "-2-3us", + "-2D:3h", + "1.5.0s", + "2SMS-15-15", + "2SMS-15D", + "100foo", + # Invalid leading +/- signs. + "+-1d", + "-+1h", + "+1", + "-7", + "+d", + "-m", + # Invalid shortcut anchors. + "SME-0", + "SME-28", + "SME-29", + "SME-FOO", + "BSM", + "SME--1", + "SMS-1", + "SMS-28", + "SMS-30", + "SMS-BAR", + "SMS-BYR", + "BSMS", + "SMS--2", + ], +) +def test_to_offset_invalid(freqstr): + # see gh-13930 + + # We escape string because some of our + # inputs contain regex special characters. + msg = re.escape(f"Invalid frequency: {freqstr}") + with pytest.raises(ValueError, match=msg): + to_offset(freqstr) + + +def test_to_offset_no_evaluate(): + msg = str(("", "")) + with pytest.raises(TypeError, match=msg): + to_offset(("", "")) + + +def test_to_offset_tuple_unsupported(): + with pytest.raises(TypeError, match="pass as a string instead"): + to_offset((5, "T")) + + +@pytest.mark.parametrize( + "freqstr,expected", + [ + ("2D 3h", offsets.Hour(51)), + ("2 D3 h", offsets.Hour(51)), + ("2 D 3 h", offsets.Hour(51)), + (" 2 D 3 h ", offsets.Hour(51)), + (" h ", offsets.Hour()), + (" 3 h ", offsets.Hour(3)), + ], +) +def test_to_offset_whitespace(freqstr, expected): + result = to_offset(freqstr) + assert result == expected + + +@pytest.mark.parametrize( + "freqstr,expected", [("00h 00min 01s", 1), ("-00h 03min 14s", -194)] +) +def test_to_offset_leading_zero(freqstr, expected): + result = to_offset(freqstr) + assert result.n == expected + + +@pytest.mark.parametrize("freqstr,expected", [("+1d", 1), ("+2h30min", 150)]) +def test_to_offset_leading_plus(freqstr, expected): + result = to_offset(freqstr) + assert result.n == expected + + +@pytest.mark.parametrize( + "kwargs,expected", + [ + ({"days": 1, "seconds": 1}, offsets.Second(86401)), + ({"days": -1, "seconds": 1}, offsets.Second(-86399)), + ({"hours": 1, "minutes": 10}, offsets.Minute(70)), + ({"hours": 1, "minutes": -10}, offsets.Minute(50)), + ({"weeks": 1}, offsets.Day(7)), + ({"hours": 1}, offsets.Hour(1)), + ({"hours": 1}, to_offset("60min")), + ({"microseconds": 1}, offsets.Micro(1)), + ({"microseconds": 0}, offsets.Nano(0)), + ], +) +def test_to_offset_pd_timedelta(kwargs, expected): + # see gh-9064 + td = Timedelta(**kwargs) + result = to_offset(td) + assert result == expected + + +@pytest.mark.parametrize( + "shortcut,expected", + [ + ("W", offsets.Week(weekday=6)), + ("W-SUN", offsets.Week(weekday=6)), + ("QE", offsets.QuarterEnd(startingMonth=12)), + ("QE-DEC", offsets.QuarterEnd(startingMonth=12)), + ("QE-MAY", offsets.QuarterEnd(startingMonth=5)), + ("SME", offsets.SemiMonthEnd(day_of_month=15)), + ("SME-15", offsets.SemiMonthEnd(day_of_month=15)), + ("SME-1", offsets.SemiMonthEnd(day_of_month=1)), + ("SME-27", offsets.SemiMonthEnd(day_of_month=27)), + ("SMS-2", offsets.SemiMonthBegin(day_of_month=2)), + ("SMS-27", offsets.SemiMonthBegin(day_of_month=27)), + ], +) +def test_anchored_shortcuts(shortcut, expected): + result = to_offset(shortcut) + assert result == expected + + +@pytest.mark.parametrize( + "freq_depr", + [ + "2ye-mar", + "2ys", + "2qe", + "2qs-feb", + "2bqs", + "2sms", + "2bms", + "2cbme", + "2me", + "2w", + ], +) +def test_to_offset_lowercase_frequency_deprecated(freq_depr): + # GH#54939 + depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a " + f"future version, please use '{freq_depr.upper()[1:]}' instead." + + with pytest.raises(FutureWarning, match=depr_msg): + to_offset(freq_depr) + + +@pytest.mark.parametrize( + "freq_depr", + [ + "2H", + "2BH", + "2MIN", + "2S", + "2Us", + "2NS", + ], +) +def test_to_offset_uppercase_frequency_deprecated(freq_depr): + # GH#54939 + depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a " + f"future version, please use '{freq_depr.lower()[1:]}' instead." + + with pytest.raises(FutureWarning, match=depr_msg): + to_offset(freq_depr) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_tzconversion.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_tzconversion.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a56ffb71b020df338721e44d56d7e03479fef6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tslibs/test_tzconversion.py @@ -0,0 +1,23 @@ +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs.tzconversion import tz_localize_to_utc + + +class TestTZLocalizeToUTC: + def test_tz_localize_to_utc_ambiguous_infer(self): + # val is a timestamp that is ambiguous when localized to US/Eastern + val = 1_320_541_200_000_000_000 + vals = np.array([val, val - 1, val], dtype=np.int64) + + with pytest.raises(pytz.AmbiguousTimeError, match="2011-11-06 01:00:00"): + tz_localize_to_utc(vals, pytz.timezone("US/Eastern"), ambiguous="infer") + + with pytest.raises(pytz.AmbiguousTimeError, match="are no repeated times"): + tz_localize_to_utc(vals[:1], pytz.timezone("US/Eastern"), ambiguous="infer") + + vals[1] += 1 + msg = "There are 2 dst switches when there should only be 1" + with pytest.raises(pytz.AmbiguousTimeError, match=msg): + tz_localize_to_utc(vals, pytz.timezone("US/Eastern"), ambiguous="infer") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/window/__pycache__/test_base_indexer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/window/__pycache__/test_base_indexer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14515dedbfef4f0b8770f5bf974a0d5337691892 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/window/__pycache__/test_base_indexer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/window/__pycache__/test_pairwise.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/window/__pycache__/test_pairwise.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11199b176e3cadb4325dd74e0a196623041d43eb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/window/__pycache__/test_pairwise.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/window/__pycache__/test_rolling_skew_kurt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/window/__pycache__/test_rolling_skew_kurt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecd4f01016b2d0965be23bf539b03bec090d047d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/window/__pycache__/test_rolling_skew_kurt.cpython-310.pyc differ