diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/api/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_api.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..60bcb97aaa3642be064bcacd130edf2084c4a55c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_api.py @@ -0,0 +1,383 @@ +from __future__ import annotations + +import pytest + +import pandas as pd +from pandas import api +import pandas._testing as tm +from pandas.api import ( + extensions as api_extensions, + indexers as api_indexers, + interchange as api_interchange, + types as api_types, + typing as api_typing, +) + + +class Base: + def check(self, namespace, expected, ignored=None): + # see which names are in the namespace, minus optional + # ignored ones + # compare vs the expected + + result = sorted( + f for f in dir(namespace) if not f.startswith("__") and f != "annotations" + ) + if ignored is not None: + result = sorted(set(result) - set(ignored)) + + expected = sorted(expected) + tm.assert_almost_equal(result, expected) + + +class TestPDApi(Base): + # these are optionally imported based on testing + # & need to be ignored + ignored = ["tests", "locale", "conftest", "_version_meson"] + + # top-level sub-packages + public_lib = [ + "api", + "arrays", + "options", + "test", + "testing", + "errors", + "plotting", + "io", + "tseries", + ] + private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"] + + # misc + misc = ["IndexSlice", "NaT", "NA"] + + # top-level classes + classes = [ + "ArrowDtype", + "Categorical", + "CategoricalIndex", + "DataFrame", + "DateOffset", + "DatetimeIndex", + "ExcelFile", + "ExcelWriter", + "Flags", + "Grouper", + "HDFStore", + "Index", + "MultiIndex", + "Period", + "PeriodIndex", + "RangeIndex", + "Series", + "SparseDtype", + "StringDtype", + "Timedelta", + "TimedeltaIndex", + "Timestamp", + "Interval", + "IntervalIndex", + "CategoricalDtype", + "PeriodDtype", + "IntervalDtype", + "DatetimeTZDtype", + "BooleanDtype", + "Int8Dtype", + "Int16Dtype", + "Int32Dtype", + "Int64Dtype", + "UInt8Dtype", + "UInt16Dtype", + "UInt32Dtype", + "UInt64Dtype", + "Float32Dtype", + "Float64Dtype", + "NamedAgg", + ] + + # these are already deprecated; awaiting removal + deprecated_classes: list[str] = [] + + # external modules exposed in pandas namespace + modules: list[str] = [] + + # top-level functions + funcs = [ + "array", + "bdate_range", + "concat", + "crosstab", + "cut", + "date_range", + "interval_range", + "eval", + "factorize", + "get_dummies", + "from_dummies", + "infer_freq", + "isna", + "isnull", + "lreshape", + "melt", + "notna", + "notnull", + "offsets", + "merge", + "merge_ordered", + "merge_asof", + "period_range", + "pivot", + "pivot_table", + "qcut", + "show_versions", + "timedelta_range", + "unique", + "value_counts", + "wide_to_long", + ] + + # top-level option funcs + funcs_option = [ + "reset_option", + "describe_option", + "get_option", + "option_context", + "set_option", + "set_eng_float_format", + ] + + # top-level read_* funcs + funcs_read = [ + "read_clipboard", + "read_csv", + "read_excel", + "read_fwf", + "read_gbq", + "read_hdf", + "read_html", + "read_xml", + "read_json", + "read_pickle", + "read_sas", + "read_sql", + "read_sql_query", + "read_sql_table", + "read_stata", + "read_table", + "read_feather", + "read_parquet", + "read_orc", + "read_spss", + ] + + # top-level json funcs + funcs_json = ["json_normalize"] + + # top-level to_* funcs + funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"] + + # top-level to deprecate in the future + deprecated_funcs_in_future: list[str] = [] + + # these are already deprecated; awaiting removal + deprecated_funcs: list[str] = [] + + # private modules in pandas namespace + private_modules = [ + "_config", + "_libs", + "_is_numpy_dev", + "_pandas_datetime_CAPI", + "_pandas_parser_CAPI", + "_testing", + "_typing", + ] + if not pd._built_with_meson: + private_modules.append("_version") + + def test_api(self): + checkthese = ( + self.public_lib + + self.private_lib + + self.misc + + self.modules + + self.classes + + self.funcs + + self.funcs_option + + self.funcs_read + + self.funcs_json + + self.funcs_to + + self.private_modules + ) + self.check(namespace=pd, expected=checkthese, ignored=self.ignored) + + def test_api_all(self): + expected = set( + self.public_lib + + self.misc + + self.modules + + self.classes + + self.funcs + + self.funcs_option + + self.funcs_read + + self.funcs_json + + self.funcs_to + ) - set(self.deprecated_classes) + actual = set(pd.__all__) + + extraneous = actual - expected + assert not extraneous + + missing = expected - actual + assert not missing + + def test_depr(self): + deprecated_list = ( + self.deprecated_classes + + self.deprecated_funcs + + self.deprecated_funcs_in_future + ) + for depr in deprecated_list: + with tm.assert_produces_warning(FutureWarning): + _ = getattr(pd, depr) + + +class TestApi(Base): + allowed_api_dirs = [ + "types", + "extensions", + "indexers", + "interchange", + "typing", + ] + allowed_typing = [ + "DataFrameGroupBy", + "DatetimeIndexResamplerGroupby", + "Expanding", + "ExpandingGroupby", + "ExponentialMovingWindow", + "ExponentialMovingWindowGroupby", + "JsonReader", + "NaTType", + "NAType", + "PeriodIndexResamplerGroupby", + "Resampler", + "Rolling", + "RollingGroupby", + "SeriesGroupBy", + "StataReader", + "TimedeltaIndexResamplerGroupby", + "TimeGrouper", + "Window", + ] + allowed_api_types = [ + "is_any_real_numeric_dtype", + "is_array_like", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_dict_like", + "is_dtype_equal", + "is_extension_array_dtype", + "is_file_like", + "is_float", + "is_float_dtype", + "is_hashable", + "is_int64_dtype", + "is_integer", + "is_integer_dtype", + "is_interval", + "is_interval_dtype", + "is_iterator", + "is_list_like", + "is_named_tuple", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_period_dtype", + "is_re", + "is_re_compilable", + "is_scalar", + "is_signed_integer_dtype", + "is_sparse", + "is_string_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "pandas_dtype", + "infer_dtype", + "union_categoricals", + "CategoricalDtype", + "DatetimeTZDtype", + "IntervalDtype", + "PeriodDtype", + ] + allowed_api_interchange = ["from_dataframe", "DataFrame"] + allowed_api_indexers = [ + "check_array_indexer", + "BaseIndexer", + "FixedForwardWindowIndexer", + "VariableOffsetWindowIndexer", + ] + allowed_api_extensions = [ + "no_default", + "ExtensionDtype", + "register_extension_dtype", + "register_dataframe_accessor", + "register_index_accessor", + "register_series_accessor", + "take", + "ExtensionArray", + "ExtensionScalarOpsMixin", + ] + + def test_api(self): + self.check(api, self.allowed_api_dirs) + + def test_api_typing(self): + self.check(api_typing, self.allowed_typing) + + def test_api_types(self): + self.check(api_types, self.allowed_api_types) + + def test_api_interchange(self): + self.check(api_interchange, self.allowed_api_interchange) + + def test_api_indexers(self): + self.check(api_indexers, self.allowed_api_indexers) + + def test_api_extensions(self): + self.check(api_extensions, self.allowed_api_extensions) + + +class TestTesting(Base): + funcs = [ + "assert_frame_equal", + "assert_series_equal", + "assert_index_equal", + "assert_extension_array_equal", + ] + + def test_testing(self): + from pandas import testing + + self.check(testing, self.funcs) + + def test_util_in_top_level(self): + with pytest.raises(AttributeError, match="foo"): + pd.util.foo + + +def test_pandas_array_alias(): + msg = "PandasArray has been renamed NumpyExtensionArray" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = pd.arrays.PandasArray + + assert res is pd.arrays.NumpyExtensionArray diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_types.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_types.py new file mode 100644 index 0000000000000000000000000000000000000000..fbaa6e7e18bcaa9a574b741b5361818f1be01ecf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/api/test_types.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import pandas._testing as tm +from pandas.api import types +from pandas.tests.api.test_api import Base + + +class TestTypes(Base): + allowed = [ + "is_any_real_numeric_dtype", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_dtype_equal", + "is_float", + "is_float_dtype", + "is_int64_dtype", + "is_integer", + "is_integer_dtype", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_scalar", + "is_sparse", + "is_string_dtype", + "is_signed_integer_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "is_period_dtype", + "is_interval", + "is_interval_dtype", + "is_re", + "is_re_compilable", + "is_dict_like", + "is_iterator", + "is_file_like", + "is_list_like", + "is_hashable", + "is_array_like", + "is_named_tuple", + "pandas_dtype", + "union_categoricals", + "infer_dtype", + "is_extension_array_dtype", + ] + deprecated: list[str] = [] + dtypes = ["CategoricalDtype", "DatetimeTZDtype", "PeriodDtype", "IntervalDtype"] + + def test_types(self): + self.check(types, self.allowed + self.dtypes + self.deprecated) + + def test_deprecated_from_api_types(self): + for t in self.deprecated: + with tm.assert_produces_warning(FutureWarning): + getattr(types, t)(1) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f236cd3115f2d53a8446ad011640f8f884145fd8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a9939213c3693387d0e89aa63e4415f98c7fad2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19abf40040796f928491c1e27fb9deb1bf3d7fcf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply_relabeling.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply_relabeling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecca380ed94bdd94a74aa2211bc71629764e29a2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_apply_relabeling.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_transform.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_transform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6086c6dd9bd6d25a9c616b75832f6db1c13772fe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_frame_transform.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_invalid_arg.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_invalid_arg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da5e2b8e2d442e6505f64d2a6b2de115c277ac73 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_invalid_arg.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_numba.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_numba.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7568d60eaaf846e73e9a80a87ccf44a03d604b5a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_numba.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply_relabeling.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply_relabeling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9cd059767de6da0180d34988410de76e1515bef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply_relabeling.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_transform.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_transform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f774bbb6baad1c02ed1527ccc270961cd499129 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_transform.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_str.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_str.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1ba5768d0325630b7a613e6561e8dba2e461bef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_str.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/common.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b4d153df54059ca2a82f336e19afb4297eb218a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/common.py @@ -0,0 +1,7 @@ +from pandas.core.groupby.base import transformation_kernels + +# There is no Series.cumcount or DataFrame.cumcount +series_transform_kernels = [ + x for x in sorted(transformation_kernels) if x != "cumcount" +] +frame_transform_kernels = [x for x in sorted(transformation_kernels) if x != "cumcount"] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply.py new file mode 100644 index 0000000000000000000000000000000000000000..b7eac6b8f0ea11cfdaaf760101eb407901c90319 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply.py @@ -0,0 +1,1733 @@ +from datetime import datetime +import warnings + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.tests.frame.common import zip_frames + + +@pytest.fixture +def int_frame_const_col(): + """ + Fixture for DataFrame of ints which are constant per column + + Columns are ['A', 'B', 'C'], with values (per column): [1, 2, 3] + """ + df = DataFrame( + np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1, + columns=["A", "B", "C"], + ) + return df + + +@pytest.fixture(params=["python", pytest.param("numba", marks=pytest.mark.single_cpu)]) +def engine(request): + if request.param == "numba": + pytest.importorskip("numba") + return request.param + + +def test_apply(float_frame, engine, request): + if engine == "numba": + mark = pytest.mark.xfail(reason="numba engine not supporting numpy ufunc yet") + request.node.add_marker(mark) + with np.errstate(all="ignore"): + # ufunc + result = np.sqrt(float_frame["A"]) + expected = float_frame.apply(np.sqrt, engine=engine)["A"] + tm.assert_series_equal(result, expected) + + # aggregator + result = float_frame.apply(np.mean, engine=engine)["A"] + expected = np.mean(float_frame["A"]) + assert result == expected + + d = float_frame.index[0] + result = float_frame.apply(np.mean, axis=1, engine=engine) + expected = np.mean(float_frame.xs(d)) + assert result[d] == expected + assert result.index is float_frame.index + + +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize("raw", [True, False]) +def test_apply_args(float_frame, axis, raw, engine, request): + if engine == "numba": + mark = pytest.mark.xfail(reason="numba engine doesn't support args") + request.node.add_marker(mark) + result = float_frame.apply( + lambda x, y: x + y, axis, args=(1,), raw=raw, engine=engine + ) + expected = float_frame + 1 + tm.assert_frame_equal(result, expected) + + +def test_apply_categorical_func(): + # GH 9573 + df = DataFrame({"c0": ["A", "A", "B", "B"], "c1": ["C", "C", "D", "D"]}) + result = df.apply(lambda ts: ts.astype("category")) + + assert result.shape == (4, 2) + assert isinstance(result["c0"].dtype, CategoricalDtype) + assert isinstance(result["c1"].dtype, CategoricalDtype) + + +def test_apply_axis1_with_ea(): + # GH#36785 + expected = DataFrame({"A": [Timestamp("2013-01-01", tz="UTC")]}) + result = expected.apply(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data, dtype", + [(1, None), (1, CategoricalDtype([1])), (Timestamp("2013-01-01", tz="UTC"), None)], +) +def test_agg_axis1_duplicate_index(data, dtype): + # GH 42380 + expected = DataFrame([[data], [data]], index=["a", "a"], dtype=dtype) + result = expected.agg(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) + + +def test_apply_mixed_datetimelike(): + # mixed datetimelike + # GH 7778 + expected = DataFrame( + { + "A": date_range("20130101", periods=3), + "B": pd.to_timedelta(np.arange(3), unit="s"), + } + ) + result = expected.apply(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", [np.sqrt, np.mean]) +def test_apply_empty(func, engine): + # empty + empty_frame = DataFrame() + + result = empty_frame.apply(func, engine=engine) + assert result.empty + + +def test_apply_float_frame(float_frame, engine): + no_rows = float_frame[:0] + result = no_rows.apply(lambda x: x.mean(), engine=engine) + expected = Series(np.nan, index=float_frame.columns) + tm.assert_series_equal(result, expected) + + no_cols = float_frame.loc[:, []] + result = no_cols.apply(lambda x: x.mean(), axis=1, engine=engine) + expected = Series(np.nan, index=float_frame.index) + tm.assert_series_equal(result, expected) + + +def test_apply_empty_except_index(engine): + # GH 2476 + expected = DataFrame(index=["a"]) + result = expected.apply(lambda x: x["a"], axis=1, engine=engine) + tm.assert_frame_equal(result, expected) + + +def test_apply_with_reduce_empty(): + # reduce with an empty DataFrame + empty_frame = DataFrame() + + x = [] + result = empty_frame.apply(x.append, axis=1, result_type="expand") + tm.assert_frame_equal(result, empty_frame) + result = empty_frame.apply(x.append, axis=1, result_type="reduce") + expected = Series([], dtype=np.float64) + tm.assert_series_equal(result, expected) + + empty_with_cols = DataFrame(columns=["a", "b", "c"]) + result = empty_with_cols.apply(x.append, axis=1, result_type="expand") + tm.assert_frame_equal(result, empty_with_cols) + result = empty_with_cols.apply(x.append, axis=1, result_type="reduce") + expected = Series([], dtype=np.float64) + tm.assert_series_equal(result, expected) + + # Ensure that x.append hasn't been called + assert x == [] + + +@pytest.mark.parametrize("func", ["sum", "prod", "any", "all"]) +def test_apply_funcs_over_empty(func): + # GH 28213 + df = DataFrame(columns=["a", "b", "c"]) + + result = df.apply(getattr(np, func)) + expected = getattr(df, func)() + if func in ("sum", "prod"): + expected = expected.astype(float) + tm.assert_series_equal(result, expected) + + +def test_nunique_empty(): + # GH 28213 + df = DataFrame(columns=["a", "b", "c"]) + + result = df.nunique() + expected = Series(0, index=df.columns) + tm.assert_series_equal(result, expected) + + result = df.T.nunique() + expected = Series([], dtype=np.float64) + tm.assert_series_equal(result, expected) + + +def test_apply_standard_nonunique(): + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"]) + + result = df.apply(lambda s: s[0], axis=1) + expected = Series([1, 4, 7], ["a", "a", "c"]) + tm.assert_series_equal(result, expected) + + result = df.T.apply(lambda s: s[0], axis=0) + tm.assert_series_equal(result, expected) + + +def test_apply_broadcast_scalars(float_frame): + # scalars + result = float_frame.apply(np.mean, result_type="broadcast") + expected = DataFrame([float_frame.mean()], index=float_frame.index) + tm.assert_frame_equal(result, expected) + + +def test_apply_broadcast_scalars_axis1(float_frame): + result = float_frame.apply(np.mean, axis=1, result_type="broadcast") + m = float_frame.mean(axis=1) + expected = DataFrame({c: m for c in float_frame.columns}) + tm.assert_frame_equal(result, expected) + + +def test_apply_broadcast_lists_columns(float_frame): + # lists + result = float_frame.apply( + lambda x: list(range(len(float_frame.columns))), + axis=1, + result_type="broadcast", + ) + m = list(range(len(float_frame.columns))) + expected = DataFrame( + [m] * len(float_frame.index), + dtype="float64", + index=float_frame.index, + columns=float_frame.columns, + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_broadcast_lists_index(float_frame): + result = float_frame.apply( + lambda x: list(range(len(float_frame.index))), result_type="broadcast" + ) + m = list(range(len(float_frame.index))) + expected = DataFrame( + {c: m for c in float_frame.columns}, + dtype="float64", + index=float_frame.index, + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_broadcast_list_lambda_func(int_frame_const_col): + # preserve columns + df = int_frame_const_col + result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="broadcast") + tm.assert_frame_equal(result, df) + + +def test_apply_broadcast_series_lambda_func(int_frame_const_col): + df = int_frame_const_col + result = df.apply( + lambda x: Series([1, 2, 3], index=list("abc")), + axis=1, + result_type="broadcast", + ) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_apply_raw_float_frame(float_frame, axis, engine): + if engine == "numba": + pytest.skip("numba can't handle when UDF returns None.") + + def _assert_raw(x): + assert isinstance(x, np.ndarray) + assert x.ndim == 1 + + float_frame.apply(_assert_raw, axis=axis, engine=engine, raw=True) + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_apply_raw_float_frame_lambda(float_frame, axis, engine): + result = float_frame.apply(np.mean, axis=axis, engine=engine, raw=True) + expected = float_frame.apply(lambda x: x.values.mean(), axis=axis) + tm.assert_series_equal(result, expected) + + +def test_apply_raw_float_frame_no_reduction(float_frame, engine): + # no reduction + result = float_frame.apply(lambda x: x * 2, engine=engine, raw=True) + expected = float_frame * 2 + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_apply_raw_mixed_type_frame(axis, engine): + if engine == "numba": + pytest.skip("isinstance check doesn't work with numba") + + def _assert_raw(x): + assert isinstance(x, np.ndarray) + assert x.ndim == 1 + + # Mixed dtype (GH-32423) + df = DataFrame( + { + "a": 1.0, + "b": 2, + "c": "foo", + "float32": np.array([1.0] * 10, dtype="float32"), + "int32": np.array([1] * 10, dtype="int32"), + }, + index=np.arange(10), + ) + df.apply(_assert_raw, axis=axis, engine=engine, raw=True) + + +def test_apply_axis1(float_frame): + d = float_frame.index[0] + result = float_frame.apply(np.mean, axis=1)[d] + expected = np.mean(float_frame.xs(d)) + assert result == expected + + +def test_apply_mixed_dtype_corner(): + df = DataFrame({"A": ["foo"], "B": [1.0]}) + result = df[:0].apply(np.mean, axis=1) + # the result here is actually kind of ambiguous, should it be a Series + # or a DataFrame? + expected = Series(np.nan, index=pd.Index([], dtype="int64")) + tm.assert_series_equal(result, expected) + + +def test_apply_mixed_dtype_corner_indexing(): + df = DataFrame({"A": ["foo"], "B": [1.0]}) + result = df.apply(lambda x: x["A"], axis=1) + expected = Series(["foo"], index=[0]) + tm.assert_series_equal(result, expected) + + result = df.apply(lambda x: x["B"], axis=1) + expected = Series([1.0], index=[0]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore::RuntimeWarning") +@pytest.mark.parametrize("ax", ["index", "columns"]) +@pytest.mark.parametrize( + "func", [lambda x: x, lambda x: x.mean()], ids=["identity", "mean"] +) +@pytest.mark.parametrize("raw", [True, False]) +@pytest.mark.parametrize("axis", [0, 1]) +def test_apply_empty_infer_type(ax, func, raw, axis, engine, request): + df = DataFrame(**{ax: ["a", "b", "c"]}) + + with np.errstate(all="ignore"): + test_res = func(np.array([], dtype="f8")) + is_reduction = not isinstance(test_res, np.ndarray) + + result = df.apply(func, axis=axis, engine=engine, raw=raw) + if is_reduction: + agg_axis = df._get_agg_axis(axis) + assert isinstance(result, Series) + assert result.index is agg_axis + else: + assert isinstance(result, DataFrame) + + +def test_apply_empty_infer_type_broadcast(): + no_cols = DataFrame(index=["a", "b", "c"]) + result = no_cols.apply(lambda x: x.mean(), result_type="broadcast") + assert isinstance(result, DataFrame) + + +def test_apply_with_args_kwds_add_some(float_frame): + def add_some(x, howmuch=0): + return x + howmuch + + result = float_frame.apply(add_some, howmuch=2) + expected = float_frame.apply(lambda x: x + 2) + tm.assert_frame_equal(result, expected) + + +def test_apply_with_args_kwds_agg_and_add(float_frame): + def agg_and_add(x, howmuch=0): + return x.mean() + howmuch + + result = float_frame.apply(agg_and_add, howmuch=2) + expected = float_frame.apply(lambda x: x.mean() + 2) + tm.assert_series_equal(result, expected) + + +def test_apply_with_args_kwds_subtract_and_divide(float_frame): + def subtract_and_divide(x, sub, divide=1): + return (x - sub) / divide + + result = float_frame.apply(subtract_and_divide, args=(2,), divide=2) + expected = float_frame.apply(lambda x: (x - 2.0) / 2.0) + tm.assert_frame_equal(result, expected) + + +def test_apply_yield_list(float_frame): + result = float_frame.apply(list) + tm.assert_frame_equal(result, float_frame) + + +def test_apply_reduce_Series(float_frame): + float_frame.iloc[::2, float_frame.columns.get_loc("A")] = np.nan + expected = float_frame.mean(1) + result = float_frame.apply(np.mean, axis=1) + tm.assert_series_equal(result, expected) + + +def test_apply_reduce_to_dict(): + # GH 25196 37544 + data = DataFrame([[1, 2], [3, 4]], columns=["c0", "c1"], index=["i0", "i1"]) + + result = data.apply(dict, axis=0) + expected = Series([{"i0": 1, "i1": 3}, {"i0": 2, "i1": 4}], index=data.columns) + tm.assert_series_equal(result, expected) + + result = data.apply(dict, axis=1) + expected = Series([{"c0": 1, "c1": 2}, {"c0": 3, "c1": 4}], index=data.index) + tm.assert_series_equal(result, expected) + + +def test_apply_differently_indexed(): + df = DataFrame(np.random.default_rng(2).standard_normal((20, 10))) + + result = df.apply(Series.describe, axis=0) + expected = DataFrame({i: v.describe() for i, v in df.items()}, columns=df.columns) + tm.assert_frame_equal(result, expected) + + result = df.apply(Series.describe, axis=1) + expected = DataFrame({i: v.describe() for i, v in df.T.items()}, columns=df.index).T + tm.assert_frame_equal(result, expected) + + +def test_apply_bug(): + # GH 6125 + positions = DataFrame( + [ + [1, "ABC0", 50], + [1, "YUM0", 20], + [1, "DEF0", 20], + [2, "ABC1", 50], + [2, "YUM1", 20], + [2, "DEF1", 20], + ], + columns=["a", "market", "position"], + ) + + def f(r): + return r["market"] + + expected = positions.apply(f, axis=1) + + positions = DataFrame( + [ + [datetime(2013, 1, 1), "ABC0", 50], + [datetime(2013, 1, 2), "YUM0", 20], + [datetime(2013, 1, 3), "DEF0", 20], + [datetime(2013, 1, 4), "ABC1", 50], + [datetime(2013, 1, 5), "YUM1", 20], + [datetime(2013, 1, 6), "DEF1", 20], + ], + columns=["a", "market", "position"], + ) + result = positions.apply(f, axis=1) + tm.assert_series_equal(result, expected) + + +def test_apply_convert_objects(): + expected = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + result = expected.apply(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) + + +def test_apply_attach_name(float_frame): + result = float_frame.apply(lambda x: x.name) + expected = Series(float_frame.columns, index=float_frame.columns) + tm.assert_series_equal(result, expected) + + +def test_apply_attach_name_axis1(float_frame): + result = float_frame.apply(lambda x: x.name, axis=1) + expected = Series(float_frame.index, index=float_frame.index) + tm.assert_series_equal(result, expected) + + +def test_apply_attach_name_non_reduction(float_frame): + # non-reductions + result = float_frame.apply(lambda x: np.repeat(x.name, len(x))) + expected = DataFrame( + np.tile(float_frame.columns, (len(float_frame.index), 1)), + index=float_frame.index, + columns=float_frame.columns, + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_attach_name_non_reduction_axis1(float_frame): + result = float_frame.apply(lambda x: np.repeat(x.name, len(x)), axis=1) + expected = Series( + np.repeat(t[0], len(float_frame.columns)) for t in float_frame.itertuples() + ) + expected.index = float_frame.index + tm.assert_series_equal(result, expected) + + +def test_apply_multi_index(): + index = MultiIndex.from_arrays([["a", "a", "b"], ["c", "d", "d"]]) + s = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["col1", "col2"]) + result = s.apply(lambda x: Series({"min": min(x), "max": max(x)}), 1) + expected = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["min", "max"]) + tm.assert_frame_equal(result, expected, check_like=True) + + +@pytest.mark.parametrize( + "df, dicts", + [ + [ + DataFrame([["foo", "bar"], ["spam", "eggs"]]), + Series([{0: "foo", 1: "spam"}, {0: "bar", 1: "eggs"}]), + ], + [DataFrame([[0, 1], [2, 3]]), Series([{0: 0, 1: 2}, {0: 1, 1: 3}])], + ], +) +def test_apply_dict(df, dicts): + # GH 8735 + fn = lambda x: x.to_dict() + reduce_true = df.apply(fn, result_type="reduce") + reduce_false = df.apply(fn, result_type="expand") + reduce_none = df.apply(fn) + + tm.assert_series_equal(reduce_true, dicts) + tm.assert_frame_equal(reduce_false, df) + tm.assert_series_equal(reduce_none, dicts) + + +def test_apply_non_numpy_dtype(): + # GH 12244 + df = DataFrame({"dt": date_range("2015-01-01", periods=3, tz="Europe/Brussels")}) + result = df.apply(lambda x: x) + tm.assert_frame_equal(result, df) + + result = df.apply(lambda x: x + pd.Timedelta("1day")) + expected = DataFrame( + {"dt": date_range("2015-01-02", periods=3, tz="Europe/Brussels")} + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_non_numpy_dtype_category(): + df = DataFrame({"dt": ["a", "b", "c", "a"]}, dtype="category") + result = df.apply(lambda x: x) + tm.assert_frame_equal(result, df) + + +def test_apply_dup_names_multi_agg(): + # GH 21063 + df = DataFrame([[0, 1], [2, 3]], columns=["a", "a"]) + expected = DataFrame([[0, 1]], columns=["a", "a"], index=["min"]) + result = df.agg(["min"]) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("op", ["apply", "agg"]) +def test_apply_nested_result_axis_1(op): + # GH 13820 + def apply_list(row): + return [2 * row["A"], 2 * row["C"], 2 * row["B"]] + + df = DataFrame(np.zeros((4, 4)), columns=list("ABCD")) + result = getattr(df, op)(apply_list, axis=1) + expected = Series( + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] + ) + tm.assert_series_equal(result, expected) + + +def test_apply_noreduction_tzaware_object(): + # https://github.com/pandas-dev/pandas/issues/31505 + expected = DataFrame( + {"foo": [Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]" + ) + result = expected.apply(lambda x: x) + tm.assert_frame_equal(result, expected) + result = expected.apply(lambda x: x.copy()) + tm.assert_frame_equal(result, expected) + + +def test_apply_function_runs_once(): + # https://github.com/pandas-dev/pandas/issues/30815 + + df = DataFrame({"a": [1, 2, 3]}) + names = [] # Save row names function is applied to + + def reducing_function(row): + names.append(row.name) + + def non_reducing_function(row): + names.append(row.name) + return row + + for func in [reducing_function, non_reducing_function]: + del names[:] + + df.apply(func, axis=1) + assert names == list(df.index) + + +def test_apply_raw_function_runs_once(engine): + # https://github.com/pandas-dev/pandas/issues/34506 + if engine == "numba": + pytest.skip("appending to list outside of numba func is not supported") + + df = DataFrame({"a": [1, 2, 3]}) + values = [] # Save row values function is applied to + + def reducing_function(row): + values.extend(row) + + def non_reducing_function(row): + values.extend(row) + return row + + for func in [reducing_function, non_reducing_function]: + del values[:] + + df.apply(func, engine=engine, raw=True, axis=1) + assert values == list(df.a.to_list()) + + +def test_apply_with_byte_string(): + # GH 34529 + df = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"]) + expected = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"], dtype=object) + # After we make the apply we expect a dataframe just + # like the original but with the object datatype + result = df.apply(lambda x: x.astype("object")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("val", ["asd", 12, None, np.nan]) +def test_apply_category_equalness(val): + # Check if categorical comparisons on apply, GH 21239 + df_values = ["asd", None, 12, "asd", "cde", np.nan] + df = DataFrame({"a": df_values}, dtype="category") + + result = df.a.apply(lambda x: x == val) + expected = Series( + [np.nan if pd.isnull(x) else x == val for x in df_values], name="a" + ) + tm.assert_series_equal(result, expected) + + +# the user has supplied an opaque UDF where +# they are transforming the input that requires +# us to infer the output + + +def test_infer_row_shape(): + # GH 17437 + # if row shape is changing, infer it + df = DataFrame(np.random.default_rng(2).random((10, 2))) + result = df.apply(np.fft.fft, axis=0).shape + assert result == (10, 2) + + result = df.apply(np.fft.rfft, axis=0).shape + assert result == (6, 2) + + +@pytest.mark.parametrize( + "ops, by_row, expected", + [ + ({"a": lambda x: x + 1}, "compat", DataFrame({"a": [2, 3]})), + ({"a": lambda x: x + 1}, False, DataFrame({"a": [2, 3]})), + ({"a": lambda x: x.sum()}, "compat", Series({"a": 3})), + ({"a": lambda x: x.sum()}, False, Series({"a": 3})), + ( + {"a": ["sum", np.sum, lambda x: x.sum()]}, + "compat", + DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", ""]), + ), + ( + {"a": ["sum", np.sum, lambda x: x.sum()]}, + False, + DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", ""]), + ), + ({"a": lambda x: 1}, "compat", DataFrame({"a": [1, 1]})), + ({"a": lambda x: 1}, False, Series({"a": 1})), + ], +) +def test_dictlike_lambda(ops, by_row, expected): + # GH53601 + df = DataFrame({"a": [1, 2]}) + result = df.apply(ops, by_row=by_row) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + {"a": lambda x: x + 1}, + {"a": lambda x: x.sum()}, + {"a": ["sum", np.sum, lambda x: x.sum()]}, + {"a": lambda x: 1}, + ], +) +def test_dictlike_lambda_raises(ops): + # GH53601 + df = DataFrame({"a": [1, 2]}) + with pytest.raises(ValueError, match="by_row=True not allowed"): + df.apply(ops, by_row=True) + + +def test_with_dictlike_columns(): + # GH 17602 + df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1) + expected = Series([{"s": 3} for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + df["tm"] = [ + Timestamp("2017-05-01 00:00:00"), + Timestamp("2017-05-02 00:00:00"), + ] + result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1) + tm.assert_series_equal(result, expected) + + # compose a series + result = (df["a"] + df["b"]).apply(lambda x: {"s": x}) + expected = Series([{"s": 3}, {"s": 3}]) + tm.assert_series_equal(result, expected) + + +def test_with_dictlike_columns_with_datetime(): + # GH 18775 + df = DataFrame() + df["author"] = ["X", "Y", "Z"] + df["publisher"] = ["BBC", "NBC", "N24"] + df["date"] = pd.to_datetime( + ["17-10-2010 07:15:30", "13-05-2011 08:20:35", "15-01-2013 09:09:09"], + dayfirst=True, + ) + result = df.apply(lambda x: {}, axis=1) + expected = Series([{}, {}, {}]) + tm.assert_series_equal(result, expected) + + +def test_with_dictlike_columns_with_infer(): + # GH 17602 + df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand") + expected = DataFrame({"s": [3, 3]}) + tm.assert_frame_equal(result, expected) + + df["tm"] = [ + Timestamp("2017-05-01 00:00:00"), + Timestamp("2017-05-02 00:00:00"), + ] + result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, by_row, expected", + [ + ([lambda x: x + 1], "compat", DataFrame({("a", ""): [2, 3]})), + ([lambda x: x + 1], False, DataFrame({("a", ""): [2, 3]})), + ([lambda x: x.sum()], "compat", DataFrame({"a": [3]}, index=[""])), + ([lambda x: x.sum()], False, DataFrame({"a": [3]}, index=[""])), + ( + ["sum", np.sum, lambda x: x.sum()], + "compat", + DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", ""]), + ), + ( + ["sum", np.sum, lambda x: x.sum()], + False, + DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", ""]), + ), + ( + [lambda x: x + 1, lambda x: 3], + "compat", + DataFrame([[2, 3], [3, 3]], columns=[["a", "a"], ["", ""]]), + ), + ( + [lambda x: 2, lambda x: 3], + False, + DataFrame({"a": [2, 3]}, ["", ""]), + ), + ], +) +def test_listlike_lambda(ops, by_row, expected): + # GH53601 + df = DataFrame({"a": [1, 2]}) + result = df.apply(ops, by_row=by_row) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + [lambda x: x + 1], + [lambda x: x.sum()], + ["sum", np.sum, lambda x: x.sum()], + [lambda x: x + 1, lambda x: 3], + ], +) +def test_listlike_lambda_raises(ops): + # GH53601 + df = DataFrame({"a": [1, 2]}) + with pytest.raises(ValueError, match="by_row=True not allowed"): + df.apply(ops, by_row=True) + + +def test_with_listlike_columns(): + # GH 17348 + df = DataFrame( + { + "a": Series(np.random.default_rng(2).standard_normal(4)), + "b": ["a", "list", "of", "words"], + "ts": date_range("2016-10-01", periods=4, freq="h"), + } + ) + + result = df[["a", "b"]].apply(tuple, axis=1) + expected = Series([t[1:] for t in df[["a", "b"]].itertuples()]) + tm.assert_series_equal(result, expected) + + result = df[["a", "ts"]].apply(tuple, axis=1) + expected = Series([t[1:] for t in df[["a", "ts"]].itertuples()]) + tm.assert_series_equal(result, expected) + + +def test_with_listlike_columns_returning_list(): + # GH 18919 + df = DataFrame({"x": Series([["a", "b"], ["q"]]), "y": Series([["z"], ["q", "t"]])}) + df.index = MultiIndex.from_tuples([("i0", "j0"), ("i1", "j1")]) + + result = df.apply(lambda row: [el for el in row["x"] if el in row["y"]], axis=1) + expected = Series([[], ["q"]], index=df.index) + tm.assert_series_equal(result, expected) + + +def test_infer_output_shape_columns(): + # GH 18573 + + df = DataFrame( + { + "number": [1.0, 2.0], + "string": ["foo", "bar"], + "datetime": [ + Timestamp("2017-11-29 03:30:00"), + Timestamp("2017-11-29 03:45:00"), + ], + } + ) + result = df.apply(lambda row: (row.number, row.string), axis=1) + expected = Series([(t.number, t.string) for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + +def test_infer_output_shape_listlike_columns(): + # GH 16353 + + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 3)), columns=["A", "B", "C"] + ) + + result = df.apply(lambda x: [1, 2, 3], axis=1) + expected = Series([[1, 2, 3] for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + result = df.apply(lambda x: [1, 2], axis=1) + expected = Series([[1, 2] for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("val", [1, 2]) +def test_infer_output_shape_listlike_columns_np_func(val): + # GH 17970 + df = DataFrame({"a": [1, 2, 3]}, index=list("abc")) + + result = df.apply(lambda row: np.ones(val), axis=1) + expected = Series([np.ones(val) for t in df.itertuples()], index=df.index) + tm.assert_series_equal(result, expected) + + +def test_infer_output_shape_listlike_columns_with_timestamp(): + # GH 17892 + df = DataFrame( + { + "a": [ + Timestamp("2010-02-01"), + Timestamp("2010-02-04"), + Timestamp("2010-02-05"), + Timestamp("2010-02-06"), + ], + "b": [9, 5, 4, 3], + "c": [5, 3, 4, 2], + "d": [1, 2, 3, 4], + } + ) + + def fun(x): + return (1, 2) + + result = df.apply(fun, axis=1) + expected = Series([(1, 2) for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("lst", [[1, 2, 3], [1, 2]]) +def test_consistent_coerce_for_shapes(lst): + # we want column names to NOT be propagated + # just because the shape matches the input shape + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=["A", "B", "C"] + ) + + result = df.apply(lambda x: lst, axis=1) + expected = Series([lst for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + +def test_consistent_names(int_frame_const_col): + # if a Series is returned, we should use the resulting index names + df = int_frame_const_col + + result = df.apply( + lambda x: Series([1, 2, 3], index=["test", "other", "cols"]), axis=1 + ) + expected = int_frame_const_col.rename( + columns={"A": "test", "B": "other", "C": "cols"} + ) + tm.assert_frame_equal(result, expected) + + result = df.apply(lambda x: Series([1, 2], index=["test", "other"]), axis=1) + expected = expected[["test", "other"]] + tm.assert_frame_equal(result, expected) + + +def test_result_type(int_frame_const_col): + # result_type should be consistent no matter which + # path we take in the code + df = int_frame_const_col + + result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand") + expected = df.copy() + expected.columns = [0, 1, 2] + tm.assert_frame_equal(result, expected) + + +def test_result_type_shorter_list(int_frame_const_col): + # result_type should be consistent no matter which + # path we take in the code + df = int_frame_const_col + result = df.apply(lambda x: [1, 2], axis=1, result_type="expand") + expected = df[["A", "B"]].copy() + expected.columns = [0, 1] + tm.assert_frame_equal(result, expected) + + +def test_result_type_broadcast(int_frame_const_col, request, engine): + # result_type should be consistent no matter which + # path we take in the code + if engine == "numba": + mark = pytest.mark.xfail(reason="numba engine doesn't support list return") + request.node.add_marker(mark) + df = int_frame_const_col + # broadcast result + result = df.apply( + lambda x: [1, 2, 3], axis=1, result_type="broadcast", engine=engine + ) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + +def test_result_type_broadcast_series_func(int_frame_const_col, engine, request): + # result_type should be consistent no matter which + # path we take in the code + if engine == "numba": + mark = pytest.mark.xfail( + reason="numba Series constructor only support ndarrays not list data" + ) + request.node.add_marker(mark) + df = int_frame_const_col + columns = ["other", "col", "names"] + result = df.apply( + lambda x: Series([1, 2, 3], index=columns), + axis=1, + result_type="broadcast", + engine=engine, + ) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + +def test_result_type_series_result(int_frame_const_col, engine, request): + # result_type should be consistent no matter which + # path we take in the code + if engine == "numba": + mark = pytest.mark.xfail( + reason="numba Series constructor only support ndarrays not list data" + ) + request.node.add_marker(mark) + df = int_frame_const_col + # series result + result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1, engine=engine) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + +def test_result_type_series_result_other_index(int_frame_const_col, engine, request): + # result_type should be consistent no matter which + # path we take in the code + + if engine == "numba": + mark = pytest.mark.xfail( + reason="no support in numba Series constructor for list of columns" + ) + request.node.add_marker(mark) + df = int_frame_const_col + # series result with other index + columns = ["other", "col", "names"] + result = df.apply(lambda x: Series([1, 2, 3], index=columns), axis=1, engine=engine) + expected = df.copy() + expected.columns = columns + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "box", + [lambda x: list(x), lambda x: tuple(x), lambda x: np.array(x, dtype="int64")], + ids=["list", "tuple", "array"], +) +def test_consistency_for_boxed(box, int_frame_const_col): + # passing an array or list should not affect the output shape + df = int_frame_const_col + + result = df.apply(lambda x: box([1, 2]), axis=1) + expected = Series([box([1, 2]) for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + result = df.apply(lambda x: box([1, 2]), axis=1, result_type="expand") + expected = int_frame_const_col[["A", "B"]].rename(columns={"A": 0, "B": 1}) + tm.assert_frame_equal(result, expected) + + +def test_agg_transform(axis, float_frame): + other_axis = 1 if axis in {0, "index"} else 0 + + with np.errstate(all="ignore"): + f_abs = np.abs(float_frame) + f_sqrt = np.sqrt(float_frame) + + # ufunc + expected = f_sqrt.copy() + result = float_frame.apply(np.sqrt, axis=axis) + tm.assert_frame_equal(result, expected) + + # list-like + result = float_frame.apply([np.sqrt], axis=axis) + expected = f_sqrt.copy() + if axis in {0, "index"}: + expected.columns = MultiIndex.from_product([float_frame.columns, ["sqrt"]]) + else: + expected.index = MultiIndex.from_product([float_frame.index, ["sqrt"]]) + tm.assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both + # functions per series and then concatting + result = float_frame.apply([np.abs, np.sqrt], axis=axis) + expected = zip_frames([f_abs, f_sqrt], axis=other_axis) + if axis in {0, "index"}: + expected.columns = MultiIndex.from_product( + [float_frame.columns, ["absolute", "sqrt"]] + ) + else: + expected.index = MultiIndex.from_product( + [float_frame.index, ["absolute", "sqrt"]] + ) + tm.assert_frame_equal(result, expected) + + +def test_demo(): + # demonstration tests + df = DataFrame({"A": range(5), "B": 5}) + + result = df.agg(["min", "max"]) + expected = DataFrame( + {"A": [0, 4], "B": [5, 5]}, columns=["A", "B"], index=["min", "max"] + ) + tm.assert_frame_equal(result, expected) + + +def test_demo_dict_agg(): + # demonstration tests + df = DataFrame({"A": range(5), "B": 5}) + result = df.agg({"A": ["min", "max"], "B": ["sum", "max"]}) + expected = DataFrame( + {"A": [4.0, 0.0, np.nan], "B": [5.0, np.nan, 25.0]}, + columns=["A", "B"], + index=["max", "min", "sum"], + ) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + +def test_agg_with_name_as_column_name(): + # GH 36212 - Column name is "name" + data = {"name": ["foo", "bar"]} + df = DataFrame(data) + + # result's name should be None + result = df.agg({"name": "count"}) + expected = Series({"name": 2}) + tm.assert_series_equal(result, expected) + + # Check if name is still preserved when aggregating series instead + result = df["name"].agg({"name": "count"}) + expected = Series({"name": 2}, name="name") + tm.assert_series_equal(result, expected) + + +def test_agg_multiple_mixed(): + # GH 20909 + mdf = DataFrame( + { + "A": [1, 2, 3], + "B": [1.0, 2.0, 3.0], + "C": ["foo", "bar", "baz"], + } + ) + expected = DataFrame( + { + "A": [1, 6], + "B": [1.0, 6.0], + "C": ["bar", "foobarbaz"], + }, + index=["min", "sum"], + ) + # sorted index + result = mdf.agg(["min", "sum"]) + tm.assert_frame_equal(result, expected) + + result = mdf[["C", "B", "A"]].agg(["sum", "min"]) + # GH40420: the result of .agg should have an index that is sorted + # according to the arguments provided to agg. + expected = expected[["C", "B", "A"]].reindex(["sum", "min"]) + tm.assert_frame_equal(result, expected) + + +def test_agg_multiple_mixed_raises(): + # GH 20909 + mdf = DataFrame( + { + "A": [1, 2, 3], + "B": [1.0, 2.0, 3.0], + "C": ["foo", "bar", "baz"], + "D": date_range("20130101", periods=3), + } + ) + + # sorted index + msg = "does not support reduction" + with pytest.raises(TypeError, match=msg): + mdf.agg(["min", "sum"]) + + with pytest.raises(TypeError, match=msg): + mdf[["D", "C", "B", "A"]].agg(["sum", "min"]) + + +def test_agg_reduce(axis, float_frame): + other_axis = 1 if axis in {0, "index"} else 0 + name1, name2 = float_frame.axes[other_axis].unique()[:2].sort_values() + + # all reducers + expected = pd.concat( + [ + float_frame.mean(axis=axis), + float_frame.max(axis=axis), + float_frame.sum(axis=axis), + ], + axis=1, + ) + expected.columns = ["mean", "max", "sum"] + expected = expected.T if axis in {0, "index"} else expected + + result = float_frame.agg(["mean", "max", "sum"], axis=axis) + tm.assert_frame_equal(result, expected) + + # dict input with scalars + func = {name1: "mean", name2: "sum"} + result = float_frame.agg(func, axis=axis) + expected = Series( + [ + float_frame.loc(other_axis)[name1].mean(), + float_frame.loc(other_axis)[name2].sum(), + ], + index=[name1, name2], + ) + tm.assert_series_equal(result, expected) + + # dict input with lists + func = {name1: ["mean"], name2: ["sum"]} + result = float_frame.agg(func, axis=axis) + expected = DataFrame( + { + name1: Series([float_frame.loc(other_axis)[name1].mean()], index=["mean"]), + name2: Series([float_frame.loc(other_axis)[name2].sum()], index=["sum"]), + } + ) + expected = expected.T if axis in {1, "columns"} else expected + tm.assert_frame_equal(result, expected) + + # dict input with lists with multiple + func = {name1: ["mean", "sum"], name2: ["sum", "max"]} + result = float_frame.agg(func, axis=axis) + expected = pd.concat( + { + name1: Series( + [ + float_frame.loc(other_axis)[name1].mean(), + float_frame.loc(other_axis)[name1].sum(), + ], + index=["mean", "sum"], + ), + name2: Series( + [ + float_frame.loc(other_axis)[name2].sum(), + float_frame.loc(other_axis)[name2].max(), + ], + index=["sum", "max"], + ), + }, + axis=1, + ) + expected = expected.T if axis in {1, "columns"} else expected + tm.assert_frame_equal(result, expected) + + +def test_nuiscance_columns(): + # GH 15015 + df = DataFrame( + { + "A": [1, 2, 3], + "B": [1.0, 2.0, 3.0], + "C": ["foo", "bar", "baz"], + "D": date_range("20130101", periods=3), + } + ) + + result = df.agg("min") + expected = Series([1, 1.0, "bar", Timestamp("20130101")], index=df.columns) + tm.assert_series_equal(result, expected) + + result = df.agg(["min"]) + expected = DataFrame( + [[1, 1.0, "bar", Timestamp("20130101").as_unit("ns")]], + index=["min"], + columns=df.columns, + ) + tm.assert_frame_equal(result, expected) + + msg = "does not support reduction" + with pytest.raises(TypeError, match=msg): + df.agg("sum") + + result = df[["A", "B", "C"]].agg("sum") + expected = Series([6, 6.0, "foobarbaz"], index=["A", "B", "C"]) + tm.assert_series_equal(result, expected) + + msg = "does not support reduction" + with pytest.raises(TypeError, match=msg): + df.agg(["sum"]) + + +@pytest.mark.parametrize("how", ["agg", "apply"]) +def test_non_callable_aggregates(how): + # GH 16405 + # 'size' is a property of frame/series + # validate that this is working + # GH 39116 - expand to apply + df = DataFrame( + {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]} + ) + + # Function aggregate + result = getattr(df, how)({"A": "count"}) + expected = Series({"A": 2}) + + tm.assert_series_equal(result, expected) + + # Non-function aggregate + result = getattr(df, how)({"A": "size"}) + expected = Series({"A": 3}) + + tm.assert_series_equal(result, expected) + + # Mix function and non-function aggs + result1 = getattr(df, how)(["count", "size"]) + result2 = getattr(df, how)( + {"A": ["count", "size"], "B": ["count", "size"], "C": ["count", "size"]} + ) + expected = DataFrame( + { + "A": {"count": 2, "size": 3}, + "B": {"count": 2, "size": 3}, + "C": {"count": 2, "size": 3}, + } + ) + + tm.assert_frame_equal(result1, result2, check_like=True) + tm.assert_frame_equal(result2, expected, check_like=True) + + # Just functional string arg is same as calling df.arg() + result = getattr(df, how)("count") + expected = df.count() + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("how", ["agg", "apply"]) +def test_size_as_str(how, axis): + # GH 39934 + df = DataFrame( + {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]} + ) + # Just a string attribute arg same as calling df.arg + # on the columns + result = getattr(df, how)("size", axis=axis) + if axis in (0, "index"): + expected = Series(df.shape[0], index=df.columns) + else: + expected = Series(df.shape[1], index=df.index) + tm.assert_series_equal(result, expected) + + +def test_agg_listlike_result(): + # GH-29587 user defined function returning list-likes + df = DataFrame({"A": [2, 2, 3], "B": [1.5, np.nan, 1.5], "C": ["foo", None, "bar"]}) + + def func(group_col): + return list(group_col.dropna().unique()) + + result = df.agg(func) + expected = Series([[2, 3], [1.5], ["foo", "bar"]], index=["A", "B", "C"]) + tm.assert_series_equal(result, expected) + + result = df.agg([func]) + expected = expected.to_frame("func").T + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize( + "args, kwargs", + [ + ((1, 2, 3), {}), + ((8, 7, 15), {}), + ((1, 2), {}), + ((1,), {"b": 2}), + ((), {"a": 1, "b": 2}), + ((), {"a": 2, "b": 1}), + ((), {"a": 1, "b": 2, "c": 3}), + ], +) +def test_agg_args_kwargs(axis, args, kwargs): + def f(x, a, b, c=3): + return x.sum() + (a + b) / c + + df = DataFrame([[1, 2], [3, 4]]) + + if axis == 0: + expected = Series([5.0, 7.0]) + else: + expected = Series([4.0, 8.0]) + + result = df.agg(f, axis, *args, **kwargs) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("num_cols", [2, 3, 5]) +def test_frequency_is_original(num_cols, engine, request): + # GH 22150 + if engine == "numba": + mark = pytest.mark.xfail(reason="numba engine only supports numeric indices") + request.node.add_marker(mark) + index = pd.DatetimeIndex(["1950-06-30", "1952-10-24", "1953-05-29"]) + original = index.copy() + df = DataFrame(1, index=index, columns=range(num_cols)) + df.apply(lambda x: x, engine=engine) + assert index.freq == original.freq + + +def test_apply_datetime_tz_issue(engine, request): + # GH 29052 + + if engine == "numba": + mark = pytest.mark.xfail( + reason="numba engine doesn't support non-numeric indexes" + ) + request.node.add_marker(mark) + + timestamps = [ + Timestamp("2019-03-15 12:34:31.909000+0000", tz="UTC"), + Timestamp("2019-03-15 12:34:34.359000+0000", tz="UTC"), + Timestamp("2019-03-15 12:34:34.660000+0000", tz="UTC"), + ] + df = DataFrame(data=[0, 1, 2], index=timestamps) + result = df.apply(lambda x: x.name, axis=1, engine=engine) + expected = Series(index=timestamps, data=timestamps) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("df", [DataFrame({"A": ["a", None], "B": ["c", "d"]})]) +@pytest.mark.parametrize("method", ["min", "max", "sum"]) +def test_mixed_column_raises(df, method, using_infer_string): + # GH 16832 + if method == "sum": + msg = r'can only concatenate str \(not "int"\) to str|does not support' + else: + msg = "not supported between instances of 'str' and 'float'" + if not using_infer_string: + with pytest.raises(TypeError, match=msg): + getattr(df, method)() + else: + getattr(df, method)() + + +@pytest.mark.parametrize("col", [1, 1.0, True, "a", np.nan]) +def test_apply_dtype(col): + # GH 31466 + df = DataFrame([[1.0, col]], columns=["a", "b"]) + result = df.apply(lambda x: x.dtype) + expected = df.dtypes + + tm.assert_series_equal(result, expected) + + +def test_apply_mutating(using_array_manager, using_copy_on_write, warn_copy_on_write): + # GH#35462 case where applied func pins a new BlockManager to a row + df = DataFrame({"a": range(100), "b": range(100, 200)}) + df_orig = df.copy() + + def func(row): + mgr = row._mgr + row.loc["a"] += 1 + assert row._mgr is not mgr + return row + + expected = df.copy() + expected["a"] += 1 + + with tm.assert_cow_warning(warn_copy_on_write): + result = df.apply(func, axis=1) + + tm.assert_frame_equal(result, expected) + if using_copy_on_write or using_array_manager: + # INFO(CoW) With copy on write, mutating a viewing row doesn't mutate the parent + # INFO(ArrayManager) With BlockManager, the row is a view and mutated in place, + # with ArrayManager the row is not a view, and thus not mutated in place + tm.assert_frame_equal(df, df_orig) + else: + tm.assert_frame_equal(df, result) + + +def test_apply_empty_list_reduce(): + # GH#35683 get columns correct + df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"]) + + result = df.apply(lambda x: [], result_type="reduce") + expected = Series({"a": [], "b": []}, dtype=object) + tm.assert_series_equal(result, expected) + + +def test_apply_no_suffix_index(engine, request): + # GH36189 + if engine == "numba": + mark = pytest.mark.xfail( + reason="numba engine doesn't support list-likes/dict-like callables" + ) + request.node.add_marker(mark) + pdf = DataFrame([[4, 9]] * 3, columns=["A", "B"]) + result = pdf.apply(["sum", lambda x: x.sum(), lambda x: x.sum()], engine=engine) + expected = DataFrame( + {"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "", ""] + ) + + tm.assert_frame_equal(result, expected) + + +def test_apply_raw_returns_string(engine): + # https://github.com/pandas-dev/pandas/issues/35940 + if engine == "numba": + pytest.skip("No object dtype support in numba") + df = DataFrame({"A": ["aa", "bbb"]}) + result = df.apply(lambda x: x[0], engine=engine, axis=1, raw=True) + expected = Series(["aa", "bbb"]) + tm.assert_series_equal(result, expected) + + +def test_aggregation_func_column_order(): + # GH40420: the result of .agg should have an index that is sorted + # according to the arguments provided to agg. + df = DataFrame( + [ + (1, 0, 0), + (2, 0, 0), + (3, 0, 0), + (4, 5, 4), + (5, 6, 6), + (6, 7, 7), + ], + columns=("att1", "att2", "att3"), + ) + + def sum_div2(s): + return s.sum() / 2 + + aggs = ["sum", sum_div2, "count", "min"] + result = df.agg(aggs) + expected = DataFrame( + { + "att1": [21.0, 10.5, 6.0, 1.0], + "att2": [18.0, 9.0, 6.0, 0.0], + "att3": [17.0, 8.5, 6.0, 0.0], + }, + index=["sum", "sum_div2", "count", "min"], + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_getitem_axis_1(engine, request): + # GH 13427 + if engine == "numba": + mark = pytest.mark.xfail( + reason="numba engine not supporting duplicate index values" + ) + request.node.add_marker(mark) + df = DataFrame({"a": [0, 1, 2], "b": [1, 2, 3]}) + result = df[["a", "a"]].apply( + lambda x: x.iloc[0] + x.iloc[1], axis=1, engine=engine + ) + expected = Series([0, 2, 4]) + tm.assert_series_equal(result, expected) + + +def test_nuisance_depr_passes_through_warnings(): + # GH 43740 + # DataFrame.agg with list-likes may emit warnings for both individual + # args and for entire columns, but we only want to emit once. We + # catch and suppress the warnings for individual args, but need to make + # sure if some other warnings were raised, they get passed through to + # the user. + + def expected_warning(x): + warnings.warn("Hello, World!") + return x.sum() + + df = DataFrame({"a": [1, 2, 3]}) + with tm.assert_produces_warning(UserWarning, match="Hello, World!"): + df.agg([expected_warning]) + + +def test_apply_type(): + # GH 46719 + df = DataFrame( + {"col1": [3, "string", float], "col2": [0.25, datetime(2020, 1, 1), np.nan]}, + index=["a", "b", "c"], + ) + + # axis=0 + result = df.apply(type, axis=0) + expected = Series({"col1": Series, "col2": Series}) + tm.assert_series_equal(result, expected) + + # axis=1 + result = df.apply(type, axis=1) + expected = Series({"a": Series, "b": Series, "c": Series}) + tm.assert_series_equal(result, expected) + + +def test_apply_on_empty_dataframe(engine): + # GH 39111 + df = DataFrame({"a": [1, 2], "b": [3, 0]}) + result = df.head(0).apply(lambda x: max(x["a"], x["b"]), axis=1, engine=engine) + expected = Series([], dtype=np.float64) + tm.assert_series_equal(result, expected) + + +def test_apply_return_list(): + df = DataFrame({"a": [1, 2], "b": [2, 3]}) + result = df.apply(lambda x: [x.values]) + expected = DataFrame({"a": [[1, 2]], "b": [[2, 3]]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "test, constant", + [ + ({"a": [1, 2, 3], "b": [1, 1, 1]}, {"a": [1, 2, 3], "b": [1]}), + ({"a": [2, 2, 2], "b": [1, 1, 1]}, {"a": [2], "b": [1]}), + ], +) +def test_unique_agg_type_is_series(test, constant): + # GH#22558 + df1 = DataFrame(test) + expected = Series(data=constant, index=["a", "b"], dtype="object") + aggregation = {"a": "unique", "b": "unique"} + + result = df1.agg(aggregation) + + tm.assert_series_equal(result, expected) + + +def test_any_apply_keyword_non_zero_axis_regression(): + # https://github.com/pandas-dev/pandas/issues/48656 + df = DataFrame({"A": [1, 2, 0], "B": [0, 2, 0], "C": [0, 0, 0]}) + expected = Series([True, True, False]) + tm.assert_series_equal(df.any(axis=1), expected) + + result = df.apply("any", axis=1) + tm.assert_series_equal(result, expected) + + result = df.apply("any", 1) + tm.assert_series_equal(result, expected) + + +def test_agg_mapping_func_deprecated(): + # GH 53325 + df = DataFrame({"x": [1, 2, 3]}) + + def foo1(x, a=1, c=0): + return x + a + c + + def foo2(x, b=2, c=0): + return x + b + c + + # single func already takes the vectorized path + result = df.agg(foo1, 0, 3, c=4) + expected = df + 7 + tm.assert_frame_equal(result, expected) + + msg = "using .+ in Series.agg cannot aggregate and" + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.agg([foo1, foo2], 0, 3, c=4) + expected = DataFrame( + [[8, 8], [9, 9], [10, 10]], columns=[["x", "x"], ["foo1", "foo2"]] + ) + tm.assert_frame_equal(result, expected) + + # TODO: the result below is wrong, should be fixed (GH53325) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.agg({"x": foo1}, 0, 3, c=4) + expected = DataFrame([2, 3, 4], columns=["x"]) + tm.assert_frame_equal(result, expected) + + +def test_agg_std(): + df = DataFrame(np.arange(6).reshape(3, 2), columns=["A", "B"]) + + with tm.assert_produces_warning(FutureWarning, match="using DataFrame.std"): + result = df.agg(np.std) + expected = Series({"A": 2.0, "B": 2.0}, dtype=float) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match="using Series.std"): + result = df.agg([np.std]) + expected = DataFrame({"A": 2.0, "B": 2.0}, index=["std"]) + tm.assert_frame_equal(result, expected) + + +def test_agg_dist_like_and_nonunique_columns(): + # GH#51099 + df = DataFrame( + {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]} + ) + df.columns = ["A", "A", "C"] + + result = df.agg({"A": "count"}) + expected = df["A"].count() + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py new file mode 100644 index 0000000000000000000000000000000000000000..723bdd349c0cb8a8f3fe73ded665b6d22260ffb5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py @@ -0,0 +1,113 @@ +import numpy as np +import pytest + +from pandas.compat.numpy import np_version_gte1p25 + +import pandas as pd +import pandas._testing as tm + + +def test_agg_relabel(): + # GH 26513 + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + + # simplest case with one column, one func + result = df.agg(foo=("B", "sum")) + expected = pd.DataFrame({"B": [10]}, index=pd.Index(["foo"])) + tm.assert_frame_equal(result, expected) + + # test on same column with different methods + result = df.agg(foo=("B", "sum"), bar=("B", "min")) + expected = pd.DataFrame({"B": [10, 1]}, index=pd.Index(["foo", "bar"])) + + tm.assert_frame_equal(result, expected) + + +def test_agg_relabel_multi_columns_multi_methods(): + # GH 26513, test on multiple columns with multiple methods + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + result = df.agg( + foo=("A", "sum"), + bar=("B", "mean"), + cat=("A", "min"), + dat=("B", "max"), + f=("A", "max"), + g=("C", "min"), + ) + expected = pd.DataFrame( + { + "A": [6.0, np.nan, 1.0, np.nan, 2.0, np.nan], + "B": [np.nan, 2.5, np.nan, 4.0, np.nan, np.nan], + "C": [np.nan, np.nan, np.nan, np.nan, np.nan, 3.0], + }, + index=pd.Index(["foo", "bar", "cat", "dat", "f", "g"]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.xfail(np_version_gte1p25, reason="name of min now equals name of np.min") +def test_agg_relabel_partial_functions(): + # GH 26513, test on partial, functools or more complex cases + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + msg = "using Series.[mean|min]" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.agg(foo=("A", np.mean), bar=("A", "mean"), cat=("A", min)) + expected = pd.DataFrame( + {"A": [1.5, 1.5, 1.0]}, index=pd.Index(["foo", "bar", "cat"]) + ) + tm.assert_frame_equal(result, expected) + + msg = "using Series.[mean|min|max|sum]" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.agg( + foo=("A", min), + bar=("A", np.min), + cat=("B", max), + dat=("C", "min"), + f=("B", np.sum), + kk=("B", lambda x: min(x)), + ) + expected = pd.DataFrame( + { + "A": [1.0, 1.0, np.nan, np.nan, np.nan, np.nan], + "B": [np.nan, np.nan, 4.0, np.nan, 10.0, 1.0], + "C": [np.nan, np.nan, np.nan, 3.0, np.nan, np.nan], + }, + index=pd.Index(["foo", "bar", "cat", "dat", "f", "kk"]), + ) + tm.assert_frame_equal(result, expected) + + +def test_agg_namedtuple(): + # GH 26513 + df = pd.DataFrame({"A": [0, 1], "B": [1, 2]}) + result = df.agg( + foo=pd.NamedAgg("B", "sum"), + bar=pd.NamedAgg("B", "min"), + cat=pd.NamedAgg(column="B", aggfunc="count"), + fft=pd.NamedAgg("B", aggfunc="max"), + ) + + expected = pd.DataFrame( + {"B": [3, 1, 2, 2]}, index=pd.Index(["foo", "bar", "cat", "fft"]) + ) + tm.assert_frame_equal(result, expected) + + result = df.agg( + foo=pd.NamedAgg("A", "min"), + bar=pd.NamedAgg(column="B", aggfunc="max"), + cat=pd.NamedAgg(column="A", aggfunc="max"), + ) + expected = pd.DataFrame( + {"A": [0.0, np.nan, 1.0], "B": [np.nan, 2.0, np.nan]}, + index=pd.Index(["foo", "bar", "cat"]), + ) + tm.assert_frame_equal(result, expected) + + +def test_reconstruct_func(): + # GH 28472, test to ensure reconstruct_func isn't moved; + # This method is used by other libraries (e.g. dask) + result = pd.core.apply.reconstruct_func("min") + expected = (False, "min", None, None) + tm.assert_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_transform.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..558d76ae8fdc4b95d46bbe94e15822779bd7c53f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_frame_transform.py @@ -0,0 +1,264 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.tests.apply.common import frame_transform_kernels +from pandas.tests.frame.common import zip_frames + + +def unpack_obj(obj, klass, axis): + """ + Helper to ensure we have the right type of object for a test parametrized + over frame_or_series. + """ + if klass is not DataFrame: + obj = obj["A"] + if axis != 0: + pytest.skip(f"Test is only for DataFrame with axis={axis}") + return obj + + +def test_transform_ufunc(axis, float_frame, frame_or_series): + # GH 35964 + obj = unpack_obj(float_frame, frame_or_series, axis) + + with np.errstate(all="ignore"): + f_sqrt = np.sqrt(obj) + + # ufunc + result = obj.transform(np.sqrt, axis=axis) + expected = f_sqrt + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, names", + [ + ([np.sqrt], ["sqrt"]), + ([np.abs, np.sqrt], ["absolute", "sqrt"]), + (np.array([np.sqrt]), ["sqrt"]), + (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]), + ], +) +def test_transform_listlike(axis, float_frame, ops, names): + # GH 35964 + other_axis = 1 if axis in {0, "index"} else 0 + with np.errstate(all="ignore"): + expected = zip_frames([op(float_frame) for op in ops], axis=other_axis) + if axis in {0, "index"}: + expected.columns = MultiIndex.from_product([float_frame.columns, names]) + else: + expected.index = MultiIndex.from_product([float_frame.index, names]) + result = float_frame.transform(ops, axis=axis) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("ops", [[], np.array([])]) +def test_transform_empty_listlike(float_frame, ops, frame_or_series): + obj = unpack_obj(float_frame, frame_or_series, 0) + + with pytest.raises(ValueError, match="No transform functions were provided"): + obj.transform(ops) + + +def test_transform_listlike_func_with_args(): + # GH 50624 + df = DataFrame({"x": [1, 2, 3]}) + + def foo1(x, a=1, c=0): + return x + a + c + + def foo2(x, b=2, c=0): + return x + b + c + + msg = r"foo1\(\) got an unexpected keyword argument 'b'" + with pytest.raises(TypeError, match=msg): + df.transform([foo1, foo2], 0, 3, b=3, c=4) + + result = df.transform([foo1, foo2], 0, 3, c=4) + expected = DataFrame( + [[8, 8], [9, 9], [10, 10]], + columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("box", [dict, Series]) +def test_transform_dictlike(axis, float_frame, box): + # GH 35964 + if axis in (0, "index"): + e = float_frame.columns[0] + expected = float_frame[[e]].transform(np.abs) + else: + e = float_frame.index[0] + expected = float_frame.iloc[[0]].transform(np.abs) + result = float_frame.transform(box({e: np.abs}), axis=axis) + tm.assert_frame_equal(result, expected) + + +def test_transform_dictlike_mixed(): + # GH 40018 - mix of lists and non-lists in values of a dictionary + df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]}) + result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"}) + expected = DataFrame( + [[1.0, 1, 1.0], [2.0, 4, 2.0]], + columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + {}, + {"A": []}, + {"A": [], "B": "cumsum"}, + {"A": "cumsum", "B": []}, + {"A": [], "B": ["cumsum"]}, + {"A": ["cumsum"], "B": []}, + ], +) +def test_transform_empty_dictlike(float_frame, ops, frame_or_series): + obj = unpack_obj(float_frame, frame_or_series, 0) + + with pytest.raises(ValueError, match="No transform functions were provided"): + obj.transform(ops) + + +@pytest.mark.parametrize("use_apply", [True, False]) +def test_transform_udf(axis, float_frame, use_apply, frame_or_series): + # GH 35964 + obj = unpack_obj(float_frame, frame_or_series, axis) + + # transform uses UDF either via apply or passing the entire DataFrame + def func(x): + # transform is using apply iff x is not a DataFrame + if use_apply == isinstance(x, frame_or_series): + # Force transform to fallback + raise ValueError + return x + 1 + + result = obj.transform(func, axis=axis) + expected = obj + 1 + tm.assert_equal(result, expected) + + +wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"] +frame_kernels_raise = [x for x in frame_transform_kernels if x not in wont_fail] + + +@pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1]) +def test_transform_bad_dtype(op, frame_or_series, request): + # GH 35964 + if op == "ngroup": + request.applymarker( + pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame") + ) + + obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms + obj = tm.get_obj(obj, frame_or_series) + error = TypeError + msg = "|".join( + [ + "not supported between instances of 'type' and 'type'", + "unsupported operand type", + ] + ) + + with pytest.raises(error, match=msg): + obj.transform(op) + with pytest.raises(error, match=msg): + obj.transform([op]) + with pytest.raises(error, match=msg): + obj.transform({"A": op}) + with pytest.raises(error, match=msg): + obj.transform({"A": [op]}) + + +@pytest.mark.parametrize("op", frame_kernels_raise) +def test_transform_failure_typeerror(request, op): + # GH 35964 + + if op == "ngroup": + request.applymarker( + pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame") + ) + + # Using object makes most transform kernels fail + df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]}) + error = TypeError + msg = "|".join( + [ + "not supported between instances of 'type' and 'type'", + "unsupported operand type", + ] + ) + + with pytest.raises(error, match=msg): + df.transform([op]) + + with pytest.raises(error, match=msg): + df.transform({"A": op, "B": op}) + + with pytest.raises(error, match=msg): + df.transform({"A": [op], "B": [op]}) + + with pytest.raises(error, match=msg): + df.transform({"A": [op, "shift"], "B": [op]}) + + +def test_transform_failure_valueerror(): + # GH 40211 + def op(x): + if np.sum(np.sum(x)) < 10: + raise ValueError + return x + + df = DataFrame({"A": [1, 2, 3], "B": [400, 500, 600]}) + msg = "Transform function failed" + + with pytest.raises(ValueError, match=msg): + df.transform([op]) + + with pytest.raises(ValueError, match=msg): + df.transform({"A": op, "B": op}) + + with pytest.raises(ValueError, match=msg): + df.transform({"A": [op], "B": [op]}) + + with pytest.raises(ValueError, match=msg): + df.transform({"A": [op, "shift"], "B": [op]}) + + +@pytest.mark.parametrize("use_apply", [True, False]) +def test_transform_passes_args(use_apply, frame_or_series): + # GH 35964 + # transform uses UDF either via apply or passing the entire DataFrame + expected_args = [1, 2] + expected_kwargs = {"c": 3} + + def f(x, a, b, c): + # transform is using apply iff x is not a DataFrame + if use_apply == isinstance(x, frame_or_series): + # Force transform to fallback + raise ValueError + assert [a, b] == expected_args + assert c == expected_kwargs["c"] + return x + + frame_or_series([1]).transform(f, 0, *expected_args, **expected_kwargs) + + +def test_transform_empty_dataframe(): + # https://github.com/pandas-dev/pandas/issues/39636 + df = DataFrame([], columns=["col1", "col2"]) + result = df.transform(lambda x: x + 10) + tm.assert_frame_equal(result, df) + + result = df["col1"].transform(lambda x: x + 10) + tm.assert_series_equal(result, df["col1"]) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_invalid_arg.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_invalid_arg.py new file mode 100644 index 0000000000000000000000000000000000000000..b5ad1094f5bf5a839600457e133aea1540655cc3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_invalid_arg.py @@ -0,0 +1,361 @@ +# Tests specifically aimed at detecting bad arguments. +# This file is organized by reason for exception. +# 1. always invalid argument values +# 2. missing column(s) +# 3. incompatible ops/dtype/args/kwargs +# 4. invalid result shape/type +# If your test does not fit into one of these categories, add to this list. + +from itertools import chain +import re + +import numpy as np +import pytest + +from pandas.errors import SpecificationError + +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("result_type", ["foo", 1]) +def test_result_type_error(result_type): + # allowed result_type + df = DataFrame( + np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1, + columns=["A", "B", "C"], + ) + + msg = ( + "invalid value for result_type, must be one of " + "{None, 'reduce', 'broadcast', 'expand'}" + ) + with pytest.raises(ValueError, match=msg): + df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type) + + +def test_apply_invalid_axis_value(): + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"]) + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.apply(lambda x: x, 2) + + +def test_agg_raises(): + # GH 26513 + df = DataFrame({"A": [0, 1], "B": [1, 2]}) + msg = "Must provide" + + with pytest.raises(TypeError, match=msg): + df.agg() + + +def test_map_with_invalid_na_action_raises(): + # https://github.com/pandas-dev/pandas/issues/32815 + s = Series([1, 2, 3]) + msg = "na_action must either be 'ignore' or None" + with pytest.raises(ValueError, match=msg): + s.map(lambda x: x, na_action="____") + + +@pytest.mark.parametrize("input_na_action", ["____", True]) +def test_map_arg_is_dict_with_invalid_na_action_raises(input_na_action): + # https://github.com/pandas-dev/pandas/issues/46588 + s = Series([1, 2, 3]) + msg = f"na_action must either be 'ignore' or None, {input_na_action} was passed" + with pytest.raises(ValueError, match=msg): + s.map({1: 2}, na_action=input_na_action) + + +@pytest.mark.parametrize("method", ["apply", "agg", "transform"]) +@pytest.mark.parametrize("func", [{"A": {"B": "sum"}}, {"A": {"B": ["sum"]}}]) +def test_nested_renamer(frame_or_series, method, func): + # GH 35964 + obj = frame_or_series({"A": [1]}) + match = "nested renamer is not supported" + with pytest.raises(SpecificationError, match=match): + getattr(obj, method)(func) + + +@pytest.mark.parametrize( + "renamer", + [{"foo": ["min", "max"]}, {"foo": ["min", "max"], "bar": ["sum", "mean"]}], +) +def test_series_nested_renamer(renamer): + s = Series(range(6), dtype="int64", name="series") + msg = "nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + s.agg(renamer) + + +def test_apply_dict_depr(): + tsdf = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + columns=["A", "B", "C"], + index=date_range("1/1/2000", periods=10), + ) + msg = "nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + tsdf.A.agg({"foo": ["sum", "mean"]}) + + +@pytest.mark.parametrize("method", ["agg", "transform"]) +def test_dict_nested_renaming_depr(method): + df = DataFrame({"A": range(5), "B": 5}) + + # nested renaming + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + getattr(df, method)({"A": {"foo": "min"}, "B": {"bar": "max"}}) + + +@pytest.mark.parametrize("method", ["apply", "agg", "transform"]) +@pytest.mark.parametrize("func", [{"B": "sum"}, {"B": ["sum"]}]) +def test_missing_column(method, func): + # GH 40004 + obj = DataFrame({"A": [1]}) + match = re.escape("Column(s) ['B'] do not exist") + with pytest.raises(KeyError, match=match): + getattr(obj, method)(func) + + +def test_transform_mixed_column_name_dtypes(): + # GH39025 + df = DataFrame({"a": ["1"]}) + msg = r"Column\(s\) \[1, 'b'\] do not exist" + with pytest.raises(KeyError, match=msg): + df.transform({"a": int, 1: str, "b": int}) + + +@pytest.mark.parametrize( + "how, args", [("pct_change", ()), ("nsmallest", (1, ["a", "b"])), ("tail", 1)] +) +def test_apply_str_axis_1_raises(how, args): + # GH 39211 - some ops don't support axis=1 + df = DataFrame({"a": [1, 2], "b": [3, 4]}) + msg = f"Operation {how} does not support axis=1" + with pytest.raises(ValueError, match=msg): + df.apply(how, axis=1, args=args) + + +def test_transform_axis_1_raises(): + # GH 35964 + msg = "No axis named 1 for object type Series" + with pytest.raises(ValueError, match=msg): + Series([1]).transform("sum", axis=1) + + +def test_apply_modify_traceback(): + data = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + data.loc[4, "C"] = np.nan + + def transform(row): + if row["C"].startswith("shin") and row["A"] == "foo": + row["D"] = 7 + return row + + msg = "'float' object has no attribute 'startswith'" + with pytest.raises(AttributeError, match=msg): + data.apply(transform, axis=1) + + +@pytest.mark.parametrize( + "df, func, expected", + tm.get_cython_table_params( + DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]] + ), +) +def test_agg_cython_table_raises_frame(df, func, expected, axis, using_infer_string): + # GH 21224 + if using_infer_string: + import pyarrow as pa + + expected = (expected, pa.lib.ArrowNotImplementedError) + + msg = "can't multiply sequence by non-int of type 'str'|has no kernel" + warn = None if isinstance(func, str) else FutureWarning + with pytest.raises(expected, match=msg): + with tm.assert_produces_warning(warn, match="using DataFrame.cumprod"): + df.agg(func, axis=axis) + + +@pytest.mark.parametrize( + "series, func, expected", + chain( + tm.get_cython_table_params( + Series("a b c".split()), + [ + ("mean", TypeError), # mean raises TypeError + ("prod", TypeError), + ("std", TypeError), + ("var", TypeError), + ("median", TypeError), + ("cumprod", TypeError), + ], + ) + ), +) +def test_agg_cython_table_raises_series(series, func, expected, using_infer_string): + # GH21224 + msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type" + if func == "median" or func is np.nanmedian or func is np.median: + msg = r"Cannot convert \['a' 'b' 'c'\] to numeric" + + if using_infer_string: + import pyarrow as pa + + expected = (expected, pa.lib.ArrowNotImplementedError) + + msg = msg + "|does not support|has no kernel" + warn = None if isinstance(func, str) else FutureWarning + + with pytest.raises(expected, match=msg): + # e.g. Series('a b'.split()).cumprod() will raise + with tm.assert_produces_warning(warn, match="is currently using Series.*"): + series.agg(func) + + +def test_agg_none_to_type(): + # GH 40543 + df = DataFrame({"a": [None]}) + msg = re.escape("int() argument must be a string") + with pytest.raises(TypeError, match=msg): + df.agg({"a": lambda x: int(x.iloc[0])}) + + +def test_transform_none_to_type(): + # GH#34377 + df = DataFrame({"a": [None]}) + msg = "argument must be a" + with pytest.raises(TypeError, match=msg): + df.transform({"a": lambda x: int(x.iloc[0])}) + + +@pytest.mark.parametrize( + "func", + [ + lambda x: np.array([1, 2]).reshape(-1, 2), + lambda x: [1, 2], + lambda x: Series([1, 2]), + ], +) +def test_apply_broadcast_error(func): + df = DataFrame( + np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1, + columns=["A", "B", "C"], + ) + + # > 1 ndim + msg = "too many dims to broadcast|cannot broadcast result" + with pytest.raises(ValueError, match=msg): + df.apply(func, axis=1, result_type="broadcast") + + +def test_transform_and_agg_err_agg(axis, float_frame): + # cannot both transform and agg + msg = "cannot combine transform and aggregation operations" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + float_frame.agg(["max", "sqrt"], axis=axis) + + +@pytest.mark.filterwarnings("ignore::FutureWarning") # GH53325 +@pytest.mark.parametrize( + "func, msg", + [ + (["sqrt", "max"], "cannot combine transform and aggregation"), + ( + {"foo": np.sqrt, "bar": "sum"}, + "cannot perform both aggregation and transformation", + ), + ], +) +def test_transform_and_agg_err_series(string_series, func, msg): + # we are trying to transform with an aggregator + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + string_series.agg(func) + + +@pytest.mark.parametrize("func", [["max", "min"], ["max", "sqrt"]]) +def test_transform_wont_agg_frame(axis, float_frame, func): + # GH 35964 + # cannot both transform and agg + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + float_frame.transform(func, axis=axis) + + +@pytest.mark.parametrize("func", [["min", "max"], ["sqrt", "max"]]) +def test_transform_wont_agg_series(string_series, func): + # GH 35964 + # we are trying to transform with an aggregator + msg = "Function did not transform" + + with pytest.raises(ValueError, match=msg): + string_series.transform(func) + + +@pytest.mark.parametrize( + "op_wrapper", [lambda x: x, lambda x: [x], lambda x: {"A": x}, lambda x: {"A": [x]}] +) +def test_transform_reducer_raises(all_reductions, frame_or_series, op_wrapper): + # GH 35964 + op = op_wrapper(all_reductions) + + obj = DataFrame({"A": [1, 2, 3]}) + obj = tm.get_obj(obj, frame_or_series) + + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + obj.transform(op) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_numba.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_numba.py new file mode 100644 index 0000000000000000000000000000000000000000..57b81711ddb48a7390d33b4624f9956fbcd473a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_numba.py @@ -0,0 +1,118 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + +pytestmark = [td.skip_if_no("numba"), pytest.mark.single_cpu] + + +@pytest.fixture(params=[0, 1]) +def apply_axis(request): + return request.param + + +def test_numba_vs_python_noop(float_frame, apply_axis): + func = lambda x: x + result = float_frame.apply(func, engine="numba", axis=apply_axis) + expected = float_frame.apply(func, engine="python", axis=apply_axis) + tm.assert_frame_equal(result, expected) + + +def test_numba_vs_python_string_index(): + # GH#56189 + pytest.importorskip("pyarrow") + df = DataFrame( + 1, + index=Index(["a", "b"], dtype="string[pyarrow_numpy]"), + columns=Index(["x", "y"], dtype="string[pyarrow_numpy]"), + ) + func = lambda x: x + result = df.apply(func, engine="numba", axis=0) + expected = df.apply(func, engine="python", axis=0) + tm.assert_frame_equal( + result, expected, check_column_type=False, check_index_type=False + ) + + +def test_numba_vs_python_indexing(): + frame = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": [7.0, 8.0, 9.0]}, + index=Index(["A", "B", "C"]), + ) + row_func = lambda x: x["c"] + result = frame.apply(row_func, engine="numba", axis=1) + expected = frame.apply(row_func, engine="python", axis=1) + tm.assert_series_equal(result, expected) + + col_func = lambda x: x["A"] + result = frame.apply(col_func, engine="numba", axis=0) + expected = frame.apply(col_func, engine="python", axis=0) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "reduction", + [lambda x: x.mean(), lambda x: x.min(), lambda x: x.max(), lambda x: x.sum()], +) +def test_numba_vs_python_reductions(reduction, apply_axis): + df = DataFrame(np.ones((4, 4), dtype=np.float64)) + result = df.apply(reduction, engine="numba", axis=apply_axis) + expected = df.apply(reduction, engine="python", axis=apply_axis) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("colnames", [[1, 2, 3], [1.0, 2.0, 3.0]]) +def test_numba_numeric_colnames(colnames): + # Check that numeric column names lower properly and can be indxed on + df = DataFrame( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64), columns=colnames + ) + first_col = colnames[0] + f = lambda x: x[first_col] # Get the first column + result = df.apply(f, engine="numba", axis=1) + expected = df.apply(f, engine="python", axis=1) + tm.assert_series_equal(result, expected) + + +def test_numba_parallel_unsupported(float_frame): + f = lambda x: x + with pytest.raises( + NotImplementedError, + match="Parallel apply is not supported when raw=False and engine='numba'", + ): + float_frame.apply(f, engine="numba", engine_kwargs={"parallel": True}) + + +def test_numba_nonunique_unsupported(apply_axis): + f = lambda x: x + df = DataFrame({"a": [1, 2]}, index=Index(["a", "a"])) + with pytest.raises( + NotImplementedError, + match="The index/columns must be unique when raw=False and engine='numba'", + ): + df.apply(f, engine="numba", axis=apply_axis) + + +def test_numba_unsupported_dtypes(apply_axis): + f = lambda x: x + df = DataFrame({"a": [1, 2], "b": ["a", "b"], "c": [4, 5]}) + df["c"] = df["c"].astype("double[pyarrow]") + + with pytest.raises( + ValueError, + match="Column b must have a numeric dtype. Found 'object|string' instead", + ): + df.apply(f, engine="numba", axis=apply_axis) + + with pytest.raises( + ValueError, + match="Column c is backed by an extension array, " + "which is not supported by the numba engine.", + ): + df["c"].to_frame().apply(f, engine="numba", axis=apply_axis) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply.py new file mode 100644 index 0000000000000000000000000000000000000000..df24fa08f48e1530cd3b3abca708a0d7743cd01d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply.py @@ -0,0 +1,701 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + concat, + date_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.tests.apply.common import series_transform_kernels + + +@pytest.fixture(params=[False, "compat"]) +def by_row(request): + return request.param + + +def test_series_map_box_timedelta(by_row): + # GH#11349 + ser = Series(timedelta_range("1 day 1 s", periods=3, freq="h")) + + def f(x): + return x.total_seconds() if by_row else x.dt.total_seconds() + + result = ser.apply(f, by_row=by_row) + + expected = ser.map(lambda x: x.total_seconds()) + tm.assert_series_equal(result, expected) + + expected = Series([86401.0, 90001.0, 93601.0]) + tm.assert_series_equal(result, expected) + + +def test_apply(datetime_series, by_row): + result = datetime_series.apply(np.sqrt, by_row=by_row) + with np.errstate(all="ignore"): + expected = np.sqrt(datetime_series) + tm.assert_series_equal(result, expected) + + # element-wise apply (ufunc) + result = datetime_series.apply(np.exp, by_row=by_row) + expected = np.exp(datetime_series) + tm.assert_series_equal(result, expected) + + # empty series + s = Series(dtype=object, name="foo", index=Index([], name="bar")) + rs = s.apply(lambda x: x, by_row=by_row) + tm.assert_series_equal(s, rs) + + # check all metadata (GH 9322) + assert s is not rs + assert s.index is rs.index + assert s.dtype == rs.dtype + assert s.name == rs.name + + # index but no data + s = Series(index=[1, 2, 3], dtype=np.float64) + rs = s.apply(lambda x: x, by_row=by_row) + tm.assert_series_equal(s, rs) + + +def test_apply_map_same_length_inference_bug(): + s = Series([1, 2]) + + def f(x): + return (x, x + 1) + + result = s.apply(f, by_row="compat") + expected = s.map(f) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("convert_dtype", [True, False]) +def test_apply_convert_dtype_deprecated(convert_dtype): + ser = Series(np.random.default_rng(2).standard_normal(10)) + + def func(x): + return x if x > 0 else np.nan + + with tm.assert_produces_warning(FutureWarning): + ser.apply(func, convert_dtype=convert_dtype, by_row="compat") + + +def test_apply_args(): + s = Series(["foo,bar"]) + + result = s.apply(str.split, args=(",",)) + assert result[0] == ["foo", "bar"] + assert isinstance(result[0], list) + + +@pytest.mark.parametrize( + "args, kwargs, increment", + [((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)], +) +def test_agg_args(args, kwargs, increment): + # GH 43357 + def f(x, a=0, b=0, c=0): + return x + a + 10 * b + 100 * c + + s = Series([1, 2]) + msg = ( + "in Series.agg cannot aggregate and has been deprecated. " + "Use Series.transform to keep behavior unchanged." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.agg(f, 0, *args, **kwargs) + expected = s + increment + tm.assert_series_equal(result, expected) + + +def test_agg_mapping_func_deprecated(): + # GH 53325 + s = Series([1, 2, 3]) + + def foo1(x, a=1, c=0): + return x + a + c + + def foo2(x, b=2, c=0): + return x + b + c + + msg = "using .+ in Series.agg cannot aggregate and" + with tm.assert_produces_warning(FutureWarning, match=msg): + s.agg(foo1, 0, 3, c=4) + with tm.assert_produces_warning(FutureWarning, match=msg): + s.agg([foo1, foo2], 0, 3, c=4) + with tm.assert_produces_warning(FutureWarning, match=msg): + s.agg({"a": foo1, "b": foo2}, 0, 3, c=4) + + +def test_series_apply_map_box_timestamps(by_row): + # GH#2689, GH#2627 + ser = Series(date_range("1/1/2000", periods=10)) + + def func(x): + return (x.hour, x.day, x.month) + + if not by_row: + msg = "Series' object has no attribute 'hour'" + with pytest.raises(AttributeError, match=msg): + ser.apply(func, by_row=by_row) + return + + result = ser.apply(func, by_row=by_row) + expected = ser.map(func) + tm.assert_series_equal(result, expected) + + +def test_apply_box_dt64(): + # ufunc will not be boxed. Same test cases as the test_map_box + vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")] + ser = Series(vals, dtype="M8[ns]") + assert ser.dtype == "datetime64[ns]" + # boxed value must be Timestamp instance + res = ser.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat") + exp = Series(["Timestamp_1_None", "Timestamp_2_None"]) + tm.assert_series_equal(res, exp) + + +def test_apply_box_dt64tz(): + vals = [ + pd.Timestamp("2011-01-01", tz="US/Eastern"), + pd.Timestamp("2011-01-02", tz="US/Eastern"), + ] + ser = Series(vals, dtype="M8[ns, US/Eastern]") + assert ser.dtype == "datetime64[ns, US/Eastern]" + res = ser.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat") + exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"]) + tm.assert_series_equal(res, exp) + + +def test_apply_box_td64(): + # timedelta + vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")] + ser = Series(vals) + assert ser.dtype == "timedelta64[ns]" + res = ser.apply(lambda x: f"{type(x).__name__}_{x.days}", by_row="compat") + exp = Series(["Timedelta_1", "Timedelta_2"]) + tm.assert_series_equal(res, exp) + + +def test_apply_box_period(): + # period + vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")] + ser = Series(vals) + assert ser.dtype == "Period[M]" + res = ser.apply(lambda x: f"{type(x).__name__}_{x.freqstr}", by_row="compat") + exp = Series(["Period_M", "Period_M"]) + tm.assert_series_equal(res, exp) + + +def test_apply_datetimetz(by_row): + values = date_range("2011-01-01", "2011-01-02", freq="h").tz_localize("Asia/Tokyo") + s = Series(values, name="XX") + + result = s.apply(lambda x: x + pd.offsets.Day(), by_row=by_row) + exp_values = date_range("2011-01-02", "2011-01-03", freq="h").tz_localize( + "Asia/Tokyo" + ) + exp = Series(exp_values, name="XX") + tm.assert_series_equal(result, exp) + + result = s.apply(lambda x: x.hour if by_row else x.dt.hour, by_row=by_row) + exp = Series(list(range(24)) + [0], name="XX", dtype="int64" if by_row else "int32") + tm.assert_series_equal(result, exp) + + # not vectorized + def f(x): + return str(x.tz) if by_row else str(x.dt.tz) + + result = s.apply(f, by_row=by_row) + if by_row: + exp = Series(["Asia/Tokyo"] * 25, name="XX") + tm.assert_series_equal(result, exp) + else: + assert result == "Asia/Tokyo" + + +def test_apply_categorical(by_row, using_infer_string): + values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True) + ser = Series(values, name="XX", index=list("abcdefg")) + + if not by_row: + msg = "Series' object has no attribute 'lower" + with pytest.raises(AttributeError, match=msg): + ser.apply(lambda x: x.lower(), by_row=by_row) + assert ser.apply(lambda x: "A", by_row=by_row) == "A" + return + + result = ser.apply(lambda x: x.lower(), by_row=by_row) + + # should be categorical dtype when the number of categories are + # the same + values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True) + exp = Series(values, name="XX", index=list("abcdefg")) + tm.assert_series_equal(result, exp) + tm.assert_categorical_equal(result.values, exp.values) + + result = ser.apply(lambda x: "A") + exp = Series(["A"] * 7, name="XX", index=list("abcdefg")) + tm.assert_series_equal(result, exp) + assert result.dtype == object if not using_infer_string else "string[pyarrow_numpy]" + + +@pytest.mark.parametrize("series", [["1-1", "1-1", np.nan], ["1-1", "1-2", np.nan]]) +def test_apply_categorical_with_nan_values(series, by_row): + # GH 20714 bug fixed in: GH 24275 + s = Series(series, dtype="category") + if not by_row: + msg = "'Series' object has no attribute 'split'" + with pytest.raises(AttributeError, match=msg): + s.apply(lambda x: x.split("-")[0], by_row=by_row) + return + + result = s.apply(lambda x: x.split("-")[0], by_row=by_row) + result = result.astype(object) + expected = Series(["1", "1", np.nan], dtype="category") + expected = expected.astype(object) + tm.assert_series_equal(result, expected) + + +def test_apply_empty_integer_series_with_datetime_index(by_row): + # GH 21245 + s = Series([], index=date_range(start="2018-01-01", periods=0), dtype=int) + result = s.apply(lambda x: x, by_row=by_row) + tm.assert_series_equal(result, s) + + +def test_apply_dataframe_iloc(): + uintDF = DataFrame(np.uint64([1, 2, 3, 4, 5]), columns=["Numbers"]) + indexDF = DataFrame([2, 3, 2, 1, 2], columns=["Indices"]) + + def retrieve(targetRow, targetDF): + val = targetDF["Numbers"].iloc[targetRow] + return val + + result = indexDF["Indices"].apply(retrieve, args=(uintDF,)) + expected = Series([3, 4, 3, 2, 3], name="Indices", dtype="uint64") + tm.assert_series_equal(result, expected) + + +def test_transform(string_series, by_row): + # transforming functions + + with np.errstate(all="ignore"): + f_sqrt = np.sqrt(string_series) + f_abs = np.abs(string_series) + + # ufunc + result = string_series.apply(np.sqrt, by_row=by_row) + expected = f_sqrt.copy() + tm.assert_series_equal(result, expected) + + # list-like + result = string_series.apply([np.sqrt], by_row=by_row) + expected = f_sqrt.to_frame().copy() + expected.columns = ["sqrt"] + tm.assert_frame_equal(result, expected) + + result = string_series.apply(["sqrt"], by_row=by_row) + tm.assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both functions per + # series and then concatting + expected = concat([f_sqrt, f_abs], axis=1) + expected.columns = ["sqrt", "absolute"] + result = string_series.apply([np.sqrt, np.abs], by_row=by_row) + tm.assert_frame_equal(result, expected) + + # dict, provide renaming + expected = concat([f_sqrt, f_abs], axis=1) + expected.columns = ["foo", "bar"] + expected = expected.unstack().rename("series") + + result = string_series.apply({"foo": np.sqrt, "bar": np.abs}, by_row=by_row) + tm.assert_series_equal(result.reindex_like(expected), expected) + + +@pytest.mark.parametrize("op", series_transform_kernels) +def test_transform_partial_failure(op, request): + # GH 35964 + if op in ("ffill", "bfill", "pad", "backfill", "shift"): + request.applymarker( + pytest.mark.xfail(reason=f"{op} is successful on any dtype") + ) + + # Using object makes most transform kernels fail + ser = Series(3 * [object]) + + if op in ("fillna", "ngroup"): + error = ValueError + msg = "Transform function failed" + else: + error = TypeError + msg = "|".join( + [ + "not supported between instances of 'type' and 'type'", + "unsupported operand type", + ] + ) + + with pytest.raises(error, match=msg): + ser.transform([op, "shift"]) + + with pytest.raises(error, match=msg): + ser.transform({"A": op, "B": "shift"}) + + with pytest.raises(error, match=msg): + ser.transform({"A": [op], "B": ["shift"]}) + + with pytest.raises(error, match=msg): + ser.transform({"A": [op, "shift"], "B": [op]}) + + +def test_transform_partial_failure_valueerror(): + # GH 40211 + def noop(x): + return x + + def raising_op(_): + raise ValueError + + ser = Series(3 * [object]) + msg = "Transform function failed" + + with pytest.raises(ValueError, match=msg): + ser.transform([noop, raising_op]) + + with pytest.raises(ValueError, match=msg): + ser.transform({"A": raising_op, "B": noop}) + + with pytest.raises(ValueError, match=msg): + ser.transform({"A": [raising_op], "B": [noop]}) + + with pytest.raises(ValueError, match=msg): + ser.transform({"A": [noop, raising_op], "B": [noop]}) + + +def test_demo(): + # demonstration tests + s = Series(range(6), dtype="int64", name="series") + + result = s.agg(["min", "max"]) + expected = Series([0, 5], index=["min", "max"], name="series") + tm.assert_series_equal(result, expected) + + result = s.agg({"foo": "min"}) + expected = Series([0], index=["foo"], name="series") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", [str, lambda x: str(x)]) +def test_apply_map_evaluate_lambdas_the_same(string_series, func, by_row): + # test that we are evaluating row-by-row first if by_row="compat" + # else vectorized evaluation + result = string_series.apply(func, by_row=by_row) + + if by_row: + expected = string_series.map(func) + tm.assert_series_equal(result, expected) + else: + assert result == str(string_series) + + +def test_agg_evaluate_lambdas(string_series): + # GH53325 + # in the future, the result will be a Series class. + + with tm.assert_produces_warning(FutureWarning): + result = string_series.agg(lambda x: type(x)) + assert isinstance(result, Series) and len(result) == len(string_series) + + with tm.assert_produces_warning(FutureWarning): + result = string_series.agg(type) + assert isinstance(result, Series) and len(result) == len(string_series) + + +@pytest.mark.parametrize("op_name", ["agg", "apply"]) +def test_with_nested_series(datetime_series, op_name): + # GH 2316 + # .agg with a reducer and a transform, what to do + msg = "cannot aggregate" + warning = FutureWarning if op_name == "agg" else None + with tm.assert_produces_warning(warning, match=msg): + # GH52123 + result = getattr(datetime_series, op_name)( + lambda x: Series([x, x**2], index=["x", "x^2"]) + ) + expected = DataFrame({"x": datetime_series, "x^2": datetime_series**2}) + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = datetime_series.agg(lambda x: Series([x, x**2], index=["x", "x^2"])) + tm.assert_frame_equal(result, expected) + + +def test_replicate_describe(string_series): + # this also tests a result set that is all scalars + expected = string_series.describe() + result = string_series.apply( + { + "count": "count", + "mean": "mean", + "std": "std", + "min": "min", + "25%": lambda x: x.quantile(0.25), + "50%": "median", + "75%": lambda x: x.quantile(0.75), + "max": "max", + }, + ) + tm.assert_series_equal(result, expected) + + +def test_reduce(string_series): + # reductions with named functions + result = string_series.agg(["sum", "mean"]) + expected = Series( + [string_series.sum(), string_series.mean()], + ["sum", "mean"], + name=string_series.name, + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "how, kwds", + [("agg", {}), ("apply", {"by_row": "compat"}), ("apply", {"by_row": False})], +) +def test_non_callable_aggregates(how, kwds): + # test agg using non-callable series attributes + # GH 39116 - expand to apply + s = Series([1, 2, None]) + + # Calling agg w/ just a string arg same as calling s.arg + result = getattr(s, how)("size", **kwds) + expected = s.size + assert result == expected + + # test when mixed w/ callable reducers + result = getattr(s, how)(["size", "count", "mean"], **kwds) + expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5}) + tm.assert_series_equal(result, expected) + + result = getattr(s, how)({"size": "size", "count": "count", "mean": "mean"}, **kwds) + tm.assert_series_equal(result, expected) + + +def test_series_apply_no_suffix_index(by_row): + # GH36189 + s = Series([4] * 3) + result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()], by_row=by_row) + expected = Series([12, 12, 12], index=["sum", "", ""]) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dti,exp", + [ + ( + Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])), + DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"), + ), + ( + Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ), + DataFrame(np.repeat([[1, 2]], 10, axis=0), dtype="int64"), + ), + ], +) +@pytest.mark.parametrize("aware", [True, False]) +def test_apply_series_on_date_time_index_aware_series(dti, exp, aware): + # GH 25959 + # Calling apply on a localized time series should not cause an error + if aware: + index = dti.tz_localize("UTC").index + else: + index = dti.index + result = Series(index).apply(lambda x: Series([1, 2])) + tm.assert_frame_equal(result, exp) + + +@pytest.mark.parametrize( + "by_row, expected", [("compat", Series(np.ones(10), dtype="int64")), (False, 1)] +) +def test_apply_scalar_on_date_time_index_aware_series(by_row, expected): + # GH 25959 + # Calling apply on a localized time series should not cause an error + series = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10, tz="UTC"), + ) + result = Series(series.index).apply(lambda x: 1, by_row=by_row) + tm.assert_equal(result, expected) + + +def test_apply_to_timedelta(by_row): + list_of_valid_strings = ["00:00:01", "00:00:02"] + a = pd.to_timedelta(list_of_valid_strings) + b = Series(list_of_valid_strings).apply(pd.to_timedelta, by_row=by_row) + tm.assert_series_equal(Series(a), b) + + list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT] + + a = pd.to_timedelta(list_of_strings) + ser = Series(list_of_strings) + b = ser.apply(pd.to_timedelta, by_row=by_row) + tm.assert_series_equal(Series(a), b) + + +@pytest.mark.parametrize( + "ops, names", + [ + ([np.sum], ["sum"]), + ([np.sum, np.mean], ["sum", "mean"]), + (np.array([np.sum]), ["sum"]), + (np.array([np.sum, np.mean]), ["sum", "mean"]), + ], +) +@pytest.mark.parametrize( + "how, kwargs", + [["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]], +) +def test_apply_listlike_reducer(string_series, ops, names, how, kwargs): + # GH 39140 + expected = Series({name: op(string_series) for name, op in zip(names, ops)}) + expected.name = "series" + warn = FutureWarning if how == "agg" else None + msg = f"using Series.[{'|'.join(names)}]" + with tm.assert_produces_warning(warn, match=msg): + result = getattr(string_series, how)(ops, **kwargs) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + {"A": np.sum}, + {"A": np.sum, "B": np.mean}, + Series({"A": np.sum}), + Series({"A": np.sum, "B": np.mean}), + ], +) +@pytest.mark.parametrize( + "how, kwargs", + [["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]], +) +def test_apply_dictlike_reducer(string_series, ops, how, kwargs, by_row): + # GH 39140 + expected = Series({name: op(string_series) for name, op in ops.items()}) + expected.name = string_series.name + warn = FutureWarning if how == "agg" else None + msg = "using Series.[sum|mean]" + with tm.assert_produces_warning(warn, match=msg): + result = getattr(string_series, how)(ops, **kwargs) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, names", + [ + ([np.sqrt], ["sqrt"]), + ([np.abs, np.sqrt], ["absolute", "sqrt"]), + (np.array([np.sqrt]), ["sqrt"]), + (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]), + ], +) +def test_apply_listlike_transformer(string_series, ops, names, by_row): + # GH 39140 + with np.errstate(all="ignore"): + expected = concat([op(string_series) for op in ops], axis=1) + expected.columns = names + result = string_series.apply(ops, by_row=by_row) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, expected", + [ + ([lambda x: x], DataFrame({"": [1, 2, 3]})), + ([lambda x: x.sum()], Series([6], index=[""])), + ], +) +def test_apply_listlike_lambda(ops, expected, by_row): + # GH53400 + ser = Series([1, 2, 3]) + result = ser.apply(ops, by_row=by_row) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + {"A": np.sqrt}, + {"A": np.sqrt, "B": np.exp}, + Series({"A": np.sqrt}), + Series({"A": np.sqrt, "B": np.exp}), + ], +) +def test_apply_dictlike_transformer(string_series, ops, by_row): + # GH 39140 + with np.errstate(all="ignore"): + expected = concat({name: op(string_series) for name, op in ops.items()}) + expected.name = string_series.name + result = string_series.apply(ops, by_row=by_row) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, expected", + [ + ( + {"a": lambda x: x}, + Series([1, 2, 3], index=MultiIndex.from_arrays([["a"] * 3, range(3)])), + ), + ({"a": lambda x: x.sum()}, Series([6], index=["a"])), + ], +) +def test_apply_dictlike_lambda(ops, by_row, expected): + # GH53400 + ser = Series([1, 2, 3]) + result = ser.apply(ops, by_row=by_row) + tm.assert_equal(result, expected) + + +def test_apply_retains_column_name(by_row): + # GH 16380 + df = DataFrame({"x": range(3)}, Index(range(3), name="x")) + result = df.x.apply(lambda x: Series(range(x + 1), Index(range(x + 1), name="y"))) + expected = DataFrame( + [[0.0, np.nan, np.nan], [0.0, 1.0, np.nan], [0.0, 1.0, 2.0]], + columns=Index(range(3), name="y"), + index=Index(range(3), name="x"), + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_type(): + # GH 46719 + s = Series([3, "string", float], index=["a", "b", "c"]) + result = s.apply(type) + expected = Series([int, str, type], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + +def test_series_apply_unpack_nested_data(): + # GH#55189 + ser = Series([[1, 2, 3], [4, 5, 6, 7]]) + result = ser.apply(lambda x: Series(x)) + expected = DataFrame({0: [1.0, 4.0], 1: [2.0, 5.0], 2: [3.0, 6.0], 3: [np.nan, 7]}) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply_relabeling.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply_relabeling.py new file mode 100644 index 0000000000000000000000000000000000000000..cdfa054f91c9b67261d715cd7812a53d1b2d4b2f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_apply_relabeling.py @@ -0,0 +1,39 @@ +import pandas as pd +import pandas._testing as tm + + +def test_relabel_no_duplicated_method(): + # this is to test there is no duplicated method used in agg + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]}) + + result = df["A"].agg(foo="sum") + expected = df["A"].agg({"foo": "sum"}) + tm.assert_series_equal(result, expected) + + result = df["B"].agg(foo="min", bar="max") + expected = df["B"].agg({"foo": "min", "bar": "max"}) + tm.assert_series_equal(result, expected) + + msg = "using Series.[sum|min|max]" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df["B"].agg(foo=sum, bar=min, cat="max") + msg = "using Series.[sum|min|max]" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df["B"].agg({"foo": sum, "bar": min, "cat": "max"}) + tm.assert_series_equal(result, expected) + + +def test_relabel_duplicated_method(): + # this is to test with nested renaming, duplicated method can be used + # if they are assigned with different new names + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]}) + + result = df["A"].agg(foo="sum", bar="sum") + expected = pd.Series([6, 6], index=["foo", "bar"], name="A") + tm.assert_series_equal(result, expected) + + msg = "using Series.min" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df["B"].agg(foo=min, bar="min") + expected = pd.Series([1, 1], index=["foo", "bar"], name="B") + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_transform.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..82592c4711ece5a7f4b6d421d743e1adbd78c345 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_series_transform.py @@ -0,0 +1,84 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, + concat, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "args, kwargs, increment", + [((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)], +) +def test_agg_args(args, kwargs, increment): + # GH 43357 + def f(x, a=0, b=0, c=0): + return x + a + 10 * b + 100 * c + + s = Series([1, 2]) + result = s.transform(f, 0, *args, **kwargs) + expected = s + increment + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, names", + [ + ([np.sqrt], ["sqrt"]), + ([np.abs, np.sqrt], ["absolute", "sqrt"]), + (np.array([np.sqrt]), ["sqrt"]), + (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]), + ], +) +def test_transform_listlike(string_series, ops, names): + # GH 35964 + with np.errstate(all="ignore"): + expected = concat([op(string_series) for op in ops], axis=1) + expected.columns = names + result = string_series.transform(ops) + tm.assert_frame_equal(result, expected) + + +def test_transform_listlike_func_with_args(): + # GH 50624 + + s = Series([1, 2, 3]) + + def foo1(x, a=1, c=0): + return x + a + c + + def foo2(x, b=2, c=0): + return x + b + c + + msg = r"foo1\(\) got an unexpected keyword argument 'b'" + with pytest.raises(TypeError, match=msg): + s.transform([foo1, foo2], 0, 3, b=3, c=4) + + result = s.transform([foo1, foo2], 0, 3, c=4) + expected = DataFrame({"foo1": [8, 9, 10], "foo2": [8, 9, 10]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("box", [dict, Series]) +def test_transform_dictlike(string_series, box): + # GH 35964 + with np.errstate(all="ignore"): + expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1) + expected.columns = ["foo", "bar"] + result = string_series.transform(box({"foo": np.sqrt, "bar": np.abs})) + tm.assert_frame_equal(result, expected) + + +def test_transform_dictlike_mixed(): + # GH 40018 - mix of lists and non-lists in values of a dictionary + df = Series([1, 4]) + result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"}) + expected = DataFrame( + [[1.0, 1, 1.0], [2.0, 4, 2.0]], + columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]), + ) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_str.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_str.py new file mode 100644 index 0000000000000000000000000000000000000000..17e8322dc40e1ef0e65ed6d63a6e4af3a373e29b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/test_str.py @@ -0,0 +1,326 @@ +from itertools import chain +import operator + +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_number + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.tests.apply.common import ( + frame_transform_kernels, + series_transform_kernels, +) + + +@pytest.mark.parametrize("func", ["sum", "mean", "min", "max", "std"]) +@pytest.mark.parametrize( + "args,kwds", + [ + pytest.param([], {}, id="no_args_or_kwds"), + pytest.param([1], {}, id="axis_from_args"), + pytest.param([], {"axis": 1}, id="axis_from_kwds"), + pytest.param([], {"numeric_only": True}, id="optional_kwds"), + pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"), + ], +) +@pytest.mark.parametrize("how", ["agg", "apply"]) +def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how): + if len(args) > 1 and how == "agg": + request.applymarker( + pytest.mark.xfail( + raises=TypeError, + reason="agg/apply signature mismatch - agg passes 2nd " + "argument to func", + ) + ) + result = getattr(float_frame, how)(func, *args, **kwds) + expected = getattr(float_frame, func)(*args, **kwds) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("arg", ["sum", "mean", "min", "max", "std"]) +def test_with_string_args(datetime_series, arg): + result = datetime_series.apply(arg) + expected = getattr(datetime_series, arg)() + assert result == expected + + +@pytest.mark.parametrize("op", ["mean", "median", "std", "var"]) +@pytest.mark.parametrize("how", ["agg", "apply"]) +def test_apply_np_reducer(op, how): + # GH 39116 + float_frame = DataFrame({"a": [1, 2], "b": [3, 4]}) + result = getattr(float_frame, how)(op) + # pandas ddof defaults to 1, numpy to 0 + kwargs = {"ddof": 1} if op in ("std", "var") else {} + expected = Series( + getattr(np, op)(float_frame, axis=0, **kwargs), index=float_frame.columns + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "op", ["abs", "ceil", "cos", "cumsum", "exp", "log", "sqrt", "square"] +) +@pytest.mark.parametrize("how", ["transform", "apply"]) +def test_apply_np_transformer(float_frame, op, how): + # GH 39116 + + # float_frame will _usually_ have negative values, which will + # trigger the warning here, but let's put one in just to be sure + float_frame.iloc[0, 0] = -1.0 + warn = None + if op in ["log", "sqrt"]: + warn = RuntimeWarning + + with tm.assert_produces_warning(warn, check_stacklevel=False): + # float_frame fixture is defined in conftest.py, so we don't check the + # stacklevel as otherwise the test would fail. + result = getattr(float_frame, how)(op) + expected = getattr(np, op)(float_frame) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "series, func, expected", + chain( + tm.get_cython_table_params( + Series(dtype=np.float64), + [ + ("sum", 0), + ("max", np.nan), + ("min", np.nan), + ("all", True), + ("any", False), + ("mean", np.nan), + ("prod", 1), + ("std", np.nan), + ("var", np.nan), + ("median", np.nan), + ], + ), + tm.get_cython_table_params( + Series([np.nan, 1, 2, 3]), + [ + ("sum", 6), + ("max", 3), + ("min", 1), + ("all", True), + ("any", True), + ("mean", 2), + ("prod", 6), + ("std", 1), + ("var", 1), + ("median", 2), + ], + ), + tm.get_cython_table_params( + Series("a b c".split()), + [ + ("sum", "abc"), + ("max", "c"), + ("min", "a"), + ("all", True), + ("any", True), + ], + ), + ), +) +def test_agg_cython_table_series(series, func, expected): + # GH21224 + # test reducing functions in + # pandas.core.base.SelectionMixin._cython_table + warn = None if isinstance(func, str) else FutureWarning + with tm.assert_produces_warning(warn, match="is currently using Series.*"): + result = series.agg(func) + if is_number(expected): + assert np.isclose(result, expected, equal_nan=True) + else: + assert result == expected + + +@pytest.mark.parametrize( + "series, func, expected", + chain( + tm.get_cython_table_params( + Series(dtype=np.float64), + [ + ("cumprod", Series([], dtype=np.float64)), + ("cumsum", Series([], dtype=np.float64)), + ], + ), + tm.get_cython_table_params( + Series([np.nan, 1, 2, 3]), + [ + ("cumprod", Series([np.nan, 1, 2, 6])), + ("cumsum", Series([np.nan, 1, 3, 6])), + ], + ), + tm.get_cython_table_params( + Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))] + ), + ), +) +def test_agg_cython_table_transform_series(series, func, expected): + # GH21224 + # test transforming functions in + # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum) + warn = None if isinstance(func, str) else FutureWarning + with tm.assert_produces_warning(warn, match="is currently using Series.*"): + result = series.agg(func) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "df, func, expected", + chain( + tm.get_cython_table_params( + DataFrame(), + [ + ("sum", Series(dtype="float64")), + ("max", Series(dtype="float64")), + ("min", Series(dtype="float64")), + ("all", Series(dtype=bool)), + ("any", Series(dtype=bool)), + ("mean", Series(dtype="float64")), + ("prod", Series(dtype="float64")), + ("std", Series(dtype="float64")), + ("var", Series(dtype="float64")), + ("median", Series(dtype="float64")), + ], + ), + tm.get_cython_table_params( + DataFrame([[np.nan, 1], [1, 2]]), + [ + ("sum", Series([1.0, 3])), + ("max", Series([1.0, 2])), + ("min", Series([1.0, 1])), + ("all", Series([True, True])), + ("any", Series([True, True])), + ("mean", Series([1, 1.5])), + ("prod", Series([1.0, 2])), + ("std", Series([np.nan, 0.707107])), + ("var", Series([np.nan, 0.5])), + ("median", Series([1, 1.5])), + ], + ), + ), +) +def test_agg_cython_table_frame(df, func, expected, axis): + # GH 21224 + # test reducing functions in + # pandas.core.base.SelectionMixin._cython_table + warn = None if isinstance(func, str) else FutureWarning + with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"): + # GH#53425 + result = df.agg(func, axis=axis) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "df, func, expected", + chain( + tm.get_cython_table_params( + DataFrame(), [("cumprod", DataFrame()), ("cumsum", DataFrame())] + ), + tm.get_cython_table_params( + DataFrame([[np.nan, 1], [1, 2]]), + [ + ("cumprod", DataFrame([[np.nan, 1], [1, 2]])), + ("cumsum", DataFrame([[np.nan, 1], [1, 3]])), + ], + ), + ), +) +def test_agg_cython_table_transform_frame(df, func, expected, axis): + # GH 21224 + # test transforming functions in + # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum) + if axis in ("columns", 1): + # operating blockwise doesn't let us preserve dtypes + expected = expected.astype("float64") + + warn = None if isinstance(func, str) else FutureWarning + with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"): + # GH#53425 + result = df.agg(func, axis=axis) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("op", series_transform_kernels) +def test_transform_groupby_kernel_series(request, string_series, op): + # GH 35964 + if op == "ngroup": + request.applymarker( + pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame") + ) + args = [0.0] if op == "fillna" else [] + ones = np.ones(string_series.shape[0]) + + warn = FutureWarning if op == "fillna" else None + msg = "SeriesGroupBy.fillna is deprecated" + with tm.assert_produces_warning(warn, match=msg): + expected = string_series.groupby(ones).transform(op, *args) + result = string_series.transform(op, 0, *args) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("op", frame_transform_kernels) +def test_transform_groupby_kernel_frame(request, axis, float_frame, op): + if op == "ngroup": + request.applymarker( + pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame") + ) + + # GH 35964 + + args = [0.0] if op == "fillna" else [] + if axis in (0, "index"): + ones = np.ones(float_frame.shape[0]) + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + else: + ones = np.ones(float_frame.shape[1]) + msg = "DataFrame.groupby with axis=1 is deprecated" + + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = float_frame.groupby(ones, axis=axis) + + warn = FutureWarning if op == "fillna" else None + op_msg = "DataFrameGroupBy.fillna is deprecated" + with tm.assert_produces_warning(warn, match=op_msg): + expected = gb.transform(op, *args) + + result = float_frame.transform(op, axis, *args) + tm.assert_frame_equal(result, expected) + + # same thing, but ensuring we have multiple blocks + assert "E" not in float_frame.columns + float_frame["E"] = float_frame["A"].copy() + assert len(float_frame._mgr.arrays) > 1 + + if axis in (0, "index"): + ones = np.ones(float_frame.shape[0]) + else: + ones = np.ones(float_frame.shape[1]) + with tm.assert_produces_warning(FutureWarning, match=msg): + gb2 = float_frame.groupby(ones, axis=axis) + warn = FutureWarning if op == "fillna" else None + op_msg = "DataFrameGroupBy.fillna is deprecated" + with tm.assert_produces_warning(warn, match=op_msg): + expected2 = gb2.transform(op, *args) + result2 = float_frame.transform(op, axis, *args) + tm.assert_frame_equal(result2, expected2) + + +@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"]) +def test_transform_method_name(method): + # GH 19760 + df = DataFrame({"A": [-1, 2]}) + result = df.transform(method) + expected = operator.methodcaller(method)(df) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f3d8d1d217c54ceee701fe48a108f30b105637b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1af899a9bf3dc9f5b47cf06c5d3fd3c8a91f0ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f768cca6e0d7c480b3587afaaf605afaf08eadaa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae670ef9cea4ffcd3d63df2ba4a8e0370a25548e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3a3b86beca36adf53837a05367cfaacdd88a47b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c259562c84aa35fedd07f6fb9e7d8a5c4e1c45d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..461b964cfa5a8a4d57086b62d53673708374f337 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02d1e84d1b54b4462a6427348bdabdf43c2ebaf6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..fc7aa9e7b2c46362aa9b6a9ebfc4f663cfd61058 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py @@ -0,0 +1,209 @@ +from copy import deepcopy +from operator import methodcaller + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrame: + @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) + def test_set_axis_name(self, func): + df = DataFrame([[1, 2], [3, 4]]) + + result = methodcaller(func, "foo")(df) + assert df.index.name is None + assert result.index.name == "foo" + + result = methodcaller(func, "cols", axis=1)(df) + assert df.columns.name is None + assert result.columns.name == "cols" + + @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) + def test_set_axis_name_mi(self, func): + df = DataFrame( + np.empty((3, 3)), + index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]), + columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]), + ) + + level_names = ["L1", "L2"] + + result = methodcaller(func, level_names)(df) + assert result.index.names == level_names + assert result.columns.names == [None, None] + + result = methodcaller(func, level_names, axis=1)(df) + assert result.columns.names == ["L1", "L2"] + assert result.index.names == [None, None] + + def test_nonzero_single_element(self): + # allow single item via bool method + msg_warn = ( + "DataFrame.bool is now deprecated and will be removed " + "in future version of pandas" + ) + df = DataFrame([[True]]) + df1 = DataFrame([[False]]) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert df.bool() + + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert not df1.bool() + + df = DataFrame([[False, False]]) + msg_err = "The truth value of a DataFrame is ambiguous" + with pytest.raises(ValueError, match=msg_err): + bool(df) + + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + with pytest.raises(ValueError, match=msg_err): + df.bool() + + def test_metadata_propagation_indiv_groupby(self): + # groupby + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + result = df.groupby("A").sum() + tm.assert_metadata_equivalent(df, result) + + def test_metadata_propagation_indiv_resample(self): + # resample + df = DataFrame( + np.random.default_rng(2).standard_normal((1000, 2)), + index=date_range("20130101", periods=1000, freq="s"), + ) + result = df.resample("1min") + tm.assert_metadata_equivalent(df, result) + + def test_metadata_propagation_indiv(self, monkeypatch): + # merging with override + # GH 6923 + + def finalize(self, other, method=None, **kwargs): + for name in self._metadata: + if method == "merge": + left, right = other.left, other.right + value = getattr(left, name, "") + "|" + getattr(right, name, "") + object.__setattr__(self, name, value) + elif method == "concat": + value = "+".join( + [getattr(o, name) for o in other.objs if getattr(o, name, None)] + ) + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, "")) + + return self + + with monkeypatch.context() as m: + m.setattr(DataFrame, "_metadata", ["filename"]) + m.setattr(DataFrame, "__finalize__", finalize) + + df1 = DataFrame( + np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["a", "b"] + ) + df2 = DataFrame( + np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["c", "d"] + ) + DataFrame._metadata = ["filename"] + df1.filename = "fname1.csv" + df2.filename = "fname2.csv" + + result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner") + assert result.filename == "fname1.csv|fname2.csv" + + # concat + # GH#6927 + df1 = DataFrame( + np.random.default_rng(2).integers(0, 4, (3, 2)), columns=list("ab") + ) + df1.filename = "foo" + + result = pd.concat([df1, df1]) + assert result.filename == "foo+foo" + + def test_set_attribute(self): + # Test for consistent setattr behavior when an attribute and a column + # have the same name (Issue #8994) + df = DataFrame({"x": [1, 2, 3]}) + + df.y = 2 + df["y"] = [2, 4, 6] + df.y = 5 + + assert df.y == 5 + tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y")) + + def test_deepcopy_empty(self): + # This test covers empty frame copying with non-empty column sets + # as reported in issue GH15370 + empty_frame = DataFrame(data=[], index=[], columns=["A"]) + empty_frame_copy = deepcopy(empty_frame) + + tm.assert_frame_equal(empty_frame_copy, empty_frame) + + +# formerly in Generic but only test DataFrame +class TestDataFrame2: + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, value): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + msg = 'For argument "inplace" expected type bool, received type' + with pytest.raises(ValueError, match=msg): + df.copy().rename_axis(mapper={"a": "x", "b": "y"}, axis=1, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().drop("a", axis=1, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().fillna(value=0, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().replace(to_replace=1, value=7, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().interpolate(inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy()._where(cond=df.a > 2, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().mask(cond=df.a > 2, inplace=value) + + def test_unexpected_keyword(self): + # GH8597 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=["jim", "joe"] + ) + ca = pd.Categorical([0, 0, 2, 2, 3, np.nan]) + ts = df["joe"].copy() + ts[2] = np.nan + + msg = "unexpected keyword" + with pytest.raises(TypeError, match=msg): + df.drop("joe", axis=1, in_place=True) + + with pytest.raises(TypeError, match=msg): + df.reindex([1, 0], inplace=True) + + with pytest.raises(TypeError, match=msg): + ca.fillna(0, inplace=True) + + with pytest.raises(TypeError, match=msg): + ts.fillna(0, in_place=True) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py new file mode 100644 index 0000000000000000000000000000000000000000..6564e381af0ea9b821e44f780ce209936f9524dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py @@ -0,0 +1,504 @@ +from copy import ( + copy, + deepcopy, +) + +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_scalar + +from pandas import ( + DataFrame, + Index, + Series, + date_range, +) +import pandas._testing as tm + +# ---------------------------------------------------------------------- +# Generic types test cases + + +def construct(box, shape, value=None, dtype=None, **kwargs): + """ + construct an object for the given shape + if value is specified use that if its a scalar + if value is an array, repeat it as needed + """ + if isinstance(shape, int): + shape = tuple([shape] * box._AXIS_LEN) + if value is not None: + if is_scalar(value): + if value == "empty": + arr = None + dtype = np.float64 + + # remove the info axis + kwargs.pop(box._info_axis_name, None) + else: + arr = np.empty(shape, dtype=dtype) + arr.fill(value) + else: + fshape = np.prod(shape) + arr = value.ravel() + new_shape = fshape / arr.shape[0] + if fshape % arr.shape[0] != 0: + raise Exception("invalid value passed in construct") + + arr = np.repeat(arr, new_shape).reshape(shape) + else: + arr = np.random.default_rng(2).standard_normal(shape) + return box(arr, dtype=dtype, **kwargs) + + +class TestGeneric: + @pytest.mark.parametrize( + "func", + [ + str.lower, + {x: x.lower() for x in list("ABCD")}, + Series({x: x.lower() for x in list("ABCD")}), + ], + ) + def test_rename(self, frame_or_series, func): + # single axis + idx = list("ABCD") + + for axis in frame_or_series._AXIS_ORDERS: + kwargs = {axis: idx} + obj = construct(frame_or_series, 4, **kwargs) + + # rename a single axis + result = obj.rename(**{axis: func}) + expected = obj.copy() + setattr(expected, axis, list("abcd")) + tm.assert_equal(result, expected) + + def test_get_numeric_data(self, frame_or_series): + n = 4 + kwargs = { + frame_or_series._get_axis_name(i): list(range(n)) + for i in range(frame_or_series._AXIS_LEN) + } + + # get the numeric data + o = construct(frame_or_series, n, **kwargs) + result = o._get_numeric_data() + tm.assert_equal(result, o) + + # non-inclusion + result = o._get_bool_data() + expected = construct(frame_or_series, n, value="empty", **kwargs) + if isinstance(o, DataFrame): + # preserve columns dtype + expected.columns = o.columns[:0] + # https://github.com/pandas-dev/pandas/issues/50862 + tm.assert_equal(result.reset_index(drop=True), expected) + + # get the bool data + arr = np.array([True, True, False, True]) + o = construct(frame_or_series, n, value=arr, **kwargs) + result = o._get_numeric_data() + tm.assert_equal(result, o) + + def test_nonzero(self, frame_or_series): + # GH 4633 + # look at the boolean/nonzero behavior for objects + obj = construct(frame_or_series, shape=4) + msg = f"The truth value of a {frame_or_series.__name__} is ambiguous" + with pytest.raises(ValueError, match=msg): + bool(obj == 0) + with pytest.raises(ValueError, match=msg): + bool(obj == 1) + with pytest.raises(ValueError, match=msg): + bool(obj) + + obj = construct(frame_or_series, shape=4, value=1) + with pytest.raises(ValueError, match=msg): + bool(obj == 0) + with pytest.raises(ValueError, match=msg): + bool(obj == 1) + with pytest.raises(ValueError, match=msg): + bool(obj) + + obj = construct(frame_or_series, shape=4, value=np.nan) + with pytest.raises(ValueError, match=msg): + bool(obj == 0) + with pytest.raises(ValueError, match=msg): + bool(obj == 1) + with pytest.raises(ValueError, match=msg): + bool(obj) + + # empty + obj = construct(frame_or_series, shape=0) + with pytest.raises(ValueError, match=msg): + bool(obj) + + # invalid behaviors + + obj1 = construct(frame_or_series, shape=4, value=1) + obj2 = construct(frame_or_series, shape=4, value=1) + + with pytest.raises(ValueError, match=msg): + if obj1: + pass + + with pytest.raises(ValueError, match=msg): + obj1 and obj2 + with pytest.raises(ValueError, match=msg): + obj1 or obj2 + with pytest.raises(ValueError, match=msg): + not obj1 + + def test_frame_or_series_compound_dtypes(self, frame_or_series): + # see gh-5191 + # Compound dtypes should raise NotImplementedError. + + def f(dtype): + return construct(frame_or_series, shape=3, value=1, dtype=dtype) + + msg = ( + "compound dtypes are not implemented " + f"in the {frame_or_series.__name__} constructor" + ) + + with pytest.raises(NotImplementedError, match=msg): + f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) + + # these work (though results may be unexpected) + f("int64") + f("float64") + f("M8[ns]") + + def test_metadata_propagation(self, frame_or_series): + # check that the metadata matches up on the resulting ops + + o = construct(frame_or_series, shape=3) + o.name = "foo" + o2 = construct(frame_or_series, shape=3) + o2.name = "bar" + + # ---------- + # preserving + # ---------- + + # simple ops with scalars + for op in ["__add__", "__sub__", "__truediv__", "__mul__"]: + result = getattr(o, op)(1) + tm.assert_metadata_equivalent(o, result) + + # ops with like + for op in ["__add__", "__sub__", "__truediv__", "__mul__"]: + result = getattr(o, op)(o) + tm.assert_metadata_equivalent(o, result) + + # simple boolean + for op in ["__eq__", "__le__", "__ge__"]: + v1 = getattr(o, op)(o) + tm.assert_metadata_equivalent(o, v1) + tm.assert_metadata_equivalent(o, v1 & v1) + tm.assert_metadata_equivalent(o, v1 | v1) + + # combine_first + result = o.combine_first(o2) + tm.assert_metadata_equivalent(o, result) + + # --------------------------- + # non-preserving (by default) + # --------------------------- + + # add non-like + result = o + o2 + tm.assert_metadata_equivalent(result) + + # simple boolean + for op in ["__eq__", "__le__", "__ge__"]: + # this is a name matching op + v1 = getattr(o, op)(o) + v2 = getattr(o, op)(o2) + tm.assert_metadata_equivalent(v2) + tm.assert_metadata_equivalent(v1 & v2) + tm.assert_metadata_equivalent(v1 | v2) + + def test_size_compat(self, frame_or_series): + # GH8846 + # size property should be defined + + o = construct(frame_or_series, shape=10) + assert o.size == np.prod(o.shape) + assert o.size == 10 ** len(o.axes) + + def test_split_compat(self, frame_or_series): + # xref GH8846 + o = construct(frame_or_series, shape=10) + with tm.assert_produces_warning( + FutureWarning, match=".swapaxes' is deprecated", check_stacklevel=False + ): + assert len(np.array_split(o, 5)) == 5 + assert len(np.array_split(o, 2)) == 2 + + # See gh-12301 + def test_stat_unexpected_keyword(self, frame_or_series): + obj = construct(frame_or_series, 5) + starwars = "Star Wars" + errmsg = "unexpected keyword" + + with pytest.raises(TypeError, match=errmsg): + obj.max(epic=starwars) # stat_function + with pytest.raises(TypeError, match=errmsg): + obj.var(epic=starwars) # stat_function_ddof + with pytest.raises(TypeError, match=errmsg): + obj.sum(epic=starwars) # cum_function + with pytest.raises(TypeError, match=errmsg): + obj.any(epic=starwars) # logical_function + + @pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"]) + def test_api_compat(self, func, frame_or_series): + # GH 12021 + # compat for __name__, __qualname__ + + obj = construct(frame_or_series, 5) + f = getattr(obj, func) + assert f.__name__ == func + assert f.__qualname__.endswith(func) + + def test_stat_non_defaults_args(self, frame_or_series): + obj = construct(frame_or_series, 5) + out = np.array([0]) + errmsg = "the 'out' parameter is not supported" + + with pytest.raises(ValueError, match=errmsg): + obj.max(out=out) # stat_function + with pytest.raises(ValueError, match=errmsg): + obj.var(out=out) # stat_function_ddof + with pytest.raises(ValueError, match=errmsg): + obj.sum(out=out) # cum_function + with pytest.raises(ValueError, match=errmsg): + obj.any(out=out) # logical_function + + def test_truncate_out_of_bounds(self, frame_or_series): + # GH11382 + + # small + shape = [2000] + ([1] * (frame_or_series._AXIS_LEN - 1)) + small = construct(frame_or_series, shape, dtype="int8", value=1) + tm.assert_equal(small.truncate(), small) + tm.assert_equal(small.truncate(before=0, after=3e3), small) + tm.assert_equal(small.truncate(before=-1, after=2e3), small) + + # big + shape = [2_000_000] + ([1] * (frame_or_series._AXIS_LEN - 1)) + big = construct(frame_or_series, shape, dtype="int8", value=1) + tm.assert_equal(big.truncate(), big) + tm.assert_equal(big.truncate(before=0, after=3e6), big) + tm.assert_equal(big.truncate(before=-1, after=2e6), big) + + @pytest.mark.parametrize( + "func", + [copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)], + ) + @pytest.mark.parametrize("shape", [0, 1, 2]) + def test_copy_and_deepcopy(self, frame_or_series, shape, func): + # GH 15444 + obj = construct(frame_or_series, shape) + obj_copy = func(obj) + assert obj_copy is not obj + tm.assert_equal(obj_copy, obj) + + def test_data_deprecated(self, frame_or_series): + obj = frame_or_series() + msg = "(Series|DataFrame)._data is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + mgr = obj._data + assert mgr is obj._mgr + + +class TestNDFrame: + # tests that don't fit elsewhere + + @pytest.mark.parametrize( + "ser", + [ + Series(range(10), dtype=np.float64), + Series([str(i) for i in range(10)], dtype=object), + ], + ) + def test_squeeze_series_noop(self, ser): + # noop + tm.assert_series_equal(ser.squeeze(), ser) + + def test_squeeze_frame_noop(self): + # noop + df = DataFrame(np.eye(2)) + tm.assert_frame_equal(df.squeeze(), df) + + def test_squeeze_frame_reindex(self): + # squeezing + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).reindex(columns=["A"]) + tm.assert_series_equal(df.squeeze(), df["A"]) + + def test_squeeze_0_len_dim(self): + # don't fail with 0 length dimensions GH11229 & GH8999 + empty_series = Series([], name="five", dtype=np.float64) + empty_frame = DataFrame([empty_series]) + tm.assert_series_equal(empty_series, empty_series.squeeze()) + tm.assert_series_equal(empty_series, empty_frame.squeeze()) + + def test_squeeze_axis(self): + # axis argument + df = DataFrame( + np.random.default_rng(2).standard_normal((1, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=1, freq="B"), + ).iloc[:, :1] + assert df.shape == (1, 1) + tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0]) + tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0]) + tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0]) + tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0]) + assert df.squeeze() == df.iloc[0, 0] + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.squeeze(axis=2) + msg = "No axis named x for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.squeeze(axis="x") + + def test_squeeze_axis_len_3(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=3, freq="B"), + ) + tm.assert_frame_equal(df.squeeze(axis=0), df) + + def test_numpy_squeeze(self): + s = Series(range(2), dtype=np.float64) + tm.assert_series_equal(np.squeeze(s), s) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).reindex(columns=["A"]) + tm.assert_series_equal(np.squeeze(df), df["A"]) + + @pytest.mark.parametrize( + "ser", + [ + Series(range(10), dtype=np.float64), + Series([str(i) for i in range(10)], dtype=object), + ], + ) + def test_transpose_series(self, ser): + # calls implementation in pandas/core/base.py + tm.assert_series_equal(ser.transpose(), ser) + + def test_transpose_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + tm.assert_frame_equal(df.transpose().transpose(), df) + + def test_numpy_transpose(self, frame_or_series): + obj = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + obj = tm.get_obj(obj, frame_or_series) + + if frame_or_series is Series: + # 1D -> np.transpose is no-op + tm.assert_series_equal(np.transpose(obj), obj) + + # round-trip preserved + tm.assert_equal(np.transpose(np.transpose(obj)), obj) + + msg = "the 'axes' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.transpose(obj, axes=1) + + @pytest.mark.parametrize( + "ser", + [ + Series(range(10), dtype=np.float64), + Series([str(i) for i in range(10)], dtype=object), + ], + ) + def test_take_series(self, ser): + indices = [1, 5, -2, 6, 3, -1] + out = ser.take(indices) + expected = Series( + data=ser.values.take(indices), + index=ser.index.take(indices), + dtype=ser.dtype, + ) + tm.assert_series_equal(out, expected) + + def test_take_frame(self): + indices = [1, 5, -2, 6, 3, -1] + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + out = df.take(indices) + expected = DataFrame( + data=df.values.take(indices, axis=0), + index=df.index.take(indices), + columns=df.columns, + ) + tm.assert_frame_equal(out, expected) + + def test_take_invalid_kwargs(self, frame_or_series): + indices = [-3, 2, 0, 1] + + obj = DataFrame(range(5)) + obj = tm.get_obj(obj, frame_or_series) + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + obj.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + obj.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + obj.take(indices, mode="clip") + + def test_axis_classmethods(self, frame_or_series): + box = frame_or_series + obj = box(dtype=object) + values = box._AXIS_TO_AXIS_NUMBER.keys() + for v in values: + assert obj._get_axis_number(v) == box._get_axis_number(v) + assert obj._get_axis_name(v) == box._get_axis_name(v) + assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v) + + def test_flags_identity(self, frame_or_series): + obj = Series([1, 2]) + if frame_or_series is DataFrame: + obj = obj.to_frame() + + assert obj.flags is obj.flags + obj2 = obj.copy() + assert obj2.flags is not obj.flags + + def test_bool_dep(self) -> None: + # GH-51749 + msg_warn = ( + "DataFrame.bool is now deprecated and will be removed " + "in future version of pandas" + ) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + DataFrame({"col": [False]}).bool() diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..97be46f716d7daa98c1c1ebab04e1e6abb3a55bc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py @@ -0,0 +1,336 @@ +import pytest + +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd + + +# Fixtures +# ======== +@pytest.fixture +def df(): + """DataFrame with columns 'L1', 'L2', and 'L3'""" + return pd.DataFrame({"L1": [1, 2, 3], "L2": [11, 12, 13], "L3": ["A", "B", "C"]}) + + +@pytest.fixture(params=[[], ["L1"], ["L1", "L2"], ["L1", "L2", "L3"]]) +def df_levels(request, df): + """DataFrame with columns or index levels 'L1', 'L2', and 'L3'""" + levels = request.param + + if levels: + df = df.set_index(levels) + + return df + + +@pytest.fixture +def df_ambig(df): + """DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3'""" + df = df.set_index(["L1", "L2"]) + + df["L1"] = df["L3"] + + return df + + +@pytest.fixture +def df_duplabels(df): + """DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2'""" + df = df.set_index(["L1"]) + df = pd.concat([df, df["L2"]], axis=1) + + return df + + +# Test is label/level reference +# ============================= +def get_labels_levels(df_levels): + expected_labels = list(df_levels.columns) + expected_levels = [name for name in df_levels.index.names if name is not None] + return expected_labels, expected_levels + + +def assert_label_reference(frame, labels, axis): + for label in labels: + assert frame._is_label_reference(label, axis=axis) + assert not frame._is_level_reference(label, axis=axis) + assert frame._is_label_or_level_reference(label, axis=axis) + + +def assert_level_reference(frame, levels, axis): + for level in levels: + assert frame._is_level_reference(level, axis=axis) + assert not frame._is_label_reference(level, axis=axis) + assert frame._is_label_or_level_reference(level, axis=axis) + + +# DataFrame +# --------- +def test_is_level_or_label_reference_df_simple(df_levels, axis): + axis = df_levels._get_axis_number(axis) + # Compute expected labels and levels + expected_labels, expected_levels = get_labels_levels(df_levels) + + # Transpose frame if axis == 1 + if axis == 1: + df_levels = df_levels.T + + # Perform checks + assert_level_reference(df_levels, expected_levels, axis=axis) + assert_label_reference(df_levels, expected_labels, axis=axis) + + +def test_is_level_reference_df_ambig(df_ambig, axis): + axis = df_ambig._get_axis_number(axis) + + # Transpose frame if axis == 1 + if axis == 1: + df_ambig = df_ambig.T + + # df has both an on-axis level and off-axis label named L1 + # Therefore L1 should reference the label, not the level + assert_label_reference(df_ambig, ["L1"], axis=axis) + + # df has an on-axis level named L2 and it is not ambiguous + # Therefore L2 is an level reference + assert_level_reference(df_ambig, ["L2"], axis=axis) + + # df has a column named L3 and it not an level reference + assert_label_reference(df_ambig, ["L3"], axis=axis) + + +# Series +# ------ +def test_is_level_reference_series_simple_axis0(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + assert_level_reference(s, ["L1"], axis=0) + assert not s._is_level_reference("L2") + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + assert_level_reference(s, ["L1", "L2"], axis=0) + assert not s._is_level_reference("L3") + + +def test_is_level_reference_series_axis1_error(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + + with pytest.raises(ValueError, match="No axis named 1"): + s._is_level_reference("L1", axis=1) + + +# Test _check_label_or_level_ambiguity_df +# ======================================= + + +# DataFrame +# --------- +def test_check_label_or_level_ambiguity_df(df_ambig, axis): + axis = df_ambig._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_ambig = df_ambig.T + msg = "'L1' is both a column level and an index label" + + else: + msg = "'L1' is both an index level and a column label" + # df_ambig has both an on-axis level and off-axis label named L1 + # Therefore, L1 is ambiguous. + with pytest.raises(ValueError, match=msg): + df_ambig._check_label_or_level_ambiguity("L1", axis=axis) + + # df_ambig has an on-axis level named L2,, and it is not ambiguous. + df_ambig._check_label_or_level_ambiguity("L2", axis=axis) + + # df_ambig has an off-axis label named L3, and it is not ambiguous + assert not df_ambig._check_label_or_level_ambiguity("L3", axis=axis) + + +# Series +# ------ +def test_check_label_or_level_ambiguity_series(df): + # A series has no columns and therefore references are never ambiguous + + # Make series with L1 as index + s = df.set_index("L1").L2 + s._check_label_or_level_ambiguity("L1", axis=0) + s._check_label_or_level_ambiguity("L2", axis=0) + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + s._check_label_or_level_ambiguity("L1", axis=0) + s._check_label_or_level_ambiguity("L2", axis=0) + s._check_label_or_level_ambiguity("L3", axis=0) + + +def test_check_label_or_level_ambiguity_series_axis1_error(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + + with pytest.raises(ValueError, match="No axis named 1"): + s._check_label_or_level_ambiguity("L1", axis=1) + + +# Test _get_label_or_level_values +# =============================== +def assert_label_values(frame, labels, axis): + axis = frame._get_axis_number(axis) + for label in labels: + if axis == 0: + expected = frame[label]._values + else: + expected = frame.loc[label]._values + + result = frame._get_label_or_level_values(label, axis=axis) + assert array_equivalent(expected, result) + + +def assert_level_values(frame, levels, axis): + axis = frame._get_axis_number(axis) + for level in levels: + if axis == 0: + expected = frame.index.get_level_values(level=level)._values + else: + expected = frame.columns.get_level_values(level=level)._values + + result = frame._get_label_or_level_values(level, axis=axis) + assert array_equivalent(expected, result) + + +# DataFrame +# --------- +def test_get_label_or_level_values_df_simple(df_levels, axis): + # Compute expected labels and levels + expected_labels, expected_levels = get_labels_levels(df_levels) + + axis = df_levels._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_levels = df_levels.T + + # Perform checks + assert_label_values(df_levels, expected_labels, axis=axis) + assert_level_values(df_levels, expected_levels, axis=axis) + + +def test_get_label_or_level_values_df_ambig(df_ambig, axis): + axis = df_ambig._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_ambig = df_ambig.T + + # df has an on-axis level named L2, and it is not ambiguous. + assert_level_values(df_ambig, ["L2"], axis=axis) + + # df has an off-axis label named L3, and it is not ambiguous. + assert_label_values(df_ambig, ["L3"], axis=axis) + + +def test_get_label_or_level_values_df_duplabels(df_duplabels, axis): + axis = df_duplabels._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_duplabels = df_duplabels.T + + # df has unambiguous level 'L1' + assert_level_values(df_duplabels, ["L1"], axis=axis) + + # df has unique label 'L3' + assert_label_values(df_duplabels, ["L3"], axis=axis) + + # df has duplicate labels 'L2' + if axis == 0: + expected_msg = "The column label 'L2' is not unique" + else: + expected_msg = "The index label 'L2' is not unique" + + with pytest.raises(ValueError, match=expected_msg): + assert_label_values(df_duplabels, ["L2"], axis=axis) + + +# Series +# ------ +def test_get_label_or_level_values_series_axis0(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + assert_level_values(s, ["L1"], axis=0) + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + assert_level_values(s, ["L1", "L2"], axis=0) + + +def test_get_label_or_level_values_series_axis1_error(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + + with pytest.raises(ValueError, match="No axis named 1"): + s._get_label_or_level_values("L1", axis=1) + + +# Test _drop_labels_or_levels +# =========================== +def assert_labels_dropped(frame, labels, axis): + axis = frame._get_axis_number(axis) + for label in labels: + df_dropped = frame._drop_labels_or_levels(label, axis=axis) + + if axis == 0: + assert label in frame.columns + assert label not in df_dropped.columns + else: + assert label in frame.index + assert label not in df_dropped.index + + +def assert_levels_dropped(frame, levels, axis): + axis = frame._get_axis_number(axis) + for level in levels: + df_dropped = frame._drop_labels_or_levels(level, axis=axis) + + if axis == 0: + assert level in frame.index.names + assert level not in df_dropped.index.names + else: + assert level in frame.columns.names + assert level not in df_dropped.columns.names + + +# DataFrame +# --------- +def test_drop_labels_or_levels_df(df_levels, axis): + # Compute expected labels and levels + expected_labels, expected_levels = get_labels_levels(df_levels) + + axis = df_levels._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_levels = df_levels.T + + # Perform checks + assert_labels_dropped(df_levels, expected_labels, axis=axis) + assert_levels_dropped(df_levels, expected_levels, axis=axis) + + with pytest.raises(ValueError, match="not valid labels or levels"): + df_levels._drop_labels_or_levels("L4", axis=axis) + + +# Series +# ------ +def test_drop_labels_or_levels_series(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + assert_levels_dropped(s, ["L1"], axis=0) + + with pytest.raises(ValueError, match="not valid labels or levels"): + s._drop_labels_or_levels("L4", axis=0) + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + assert_levels_dropped(s, ["L1", "L2"], axis=0) + + with pytest.raises(ValueError, match="not valid labels or levels"): + s._drop_labels_or_levels("L4", axis=0) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py new file mode 100644 index 0000000000000000000000000000000000000000..d8401a8b2ae3f3b885374375b64910e166dbe525 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py @@ -0,0 +1,130 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + +pytest.importorskip("xarray") + + +class TestDataFrameToXArray: + @pytest.fixture + def df(self): + return DataFrame( + { + "a": list("abcd"), + "b": list(range(1, 5)), + "c": np.arange(3, 7).astype("u1"), + "d": np.arange(4.0, 8.0, dtype="float64"), + "e": [True, False, True, False], + "f": Categorical(list("abcd")), + "g": date_range("20130101", periods=4), + "h": date_range("20130101", periods=4, tz="US/Eastern"), + } + ) + + def test_to_xarray_index_types(self, index_flat, df, using_infer_string): + index = index_flat + # MultiIndex is tested in test_to_xarray_with_multiindex + if len(index) == 0: + pytest.skip("Test doesn't make sense for empty index") + + from xarray import Dataset + + df.index = index[:4] + df.index.name = "foo" + df.columns.name = "bar" + result = df.to_xarray() + assert result.sizes["foo"] == 4 + assert len(result.coords) == 1 + assert len(result.data_vars) == 8 + tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) + assert isinstance(result, Dataset) + + # idempotency + # datetimes w/tz are preserved + # column names are lost + expected = df.copy() + expected["f"] = expected["f"].astype( + object if not using_infer_string else "string[pyarrow_numpy]" + ) + expected.columns.name = None + tm.assert_frame_equal(result.to_dataframe(), expected) + + def test_to_xarray_empty(self, df): + from xarray import Dataset + + df.index.name = "foo" + result = df[0:0].to_xarray() + assert result.sizes["foo"] == 0 + assert isinstance(result, Dataset) + + def test_to_xarray_with_multiindex(self, df, using_infer_string): + from xarray import Dataset + + # MultiIndex + df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"]) + result = df.to_xarray() + assert result.sizes["one"] == 1 + assert result.sizes["two"] == 4 + assert len(result.coords) == 2 + assert len(result.data_vars) == 8 + tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"]) + assert isinstance(result, Dataset) + + result = result.to_dataframe() + expected = df.copy() + expected["f"] = expected["f"].astype( + object if not using_infer_string else "string[pyarrow_numpy]" + ) + expected.columns.name = None + tm.assert_frame_equal(result, expected) + + +class TestSeriesToXArray: + def test_to_xarray_index_types(self, index_flat): + index = index_flat + # MultiIndex is tested in test_to_xarray_with_multiindex + + from xarray import DataArray + + ser = Series(range(len(index)), index=index, dtype="int64") + ser.index.name = "foo" + result = ser.to_xarray() + repr(result) + assert len(result) == len(index) + assert len(result.coords) == 1 + tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) + assert isinstance(result, DataArray) + + # idempotency + tm.assert_series_equal(result.to_series(), ser) + + def test_to_xarray_empty(self): + from xarray import DataArray + + ser = Series([], dtype=object) + ser.index.name = "foo" + result = ser.to_xarray() + assert len(result) == 0 + assert len(result.coords) == 1 + tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) + assert isinstance(result, DataArray) + + def test_to_xarray_with_multiindex(self): + from xarray import DataArray + + mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"]) + ser = Series(range(6), dtype="int64", index=mi) + result = ser.to_xarray() + assert len(result) == 2 + tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"]) + assert isinstance(result, DataArray) + res = result.to_series() + tm.assert_series_equal(res, ser) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_equals.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_equals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..799b93d9a2ee343af61c59ae7c6bb3d38a45f0f7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_equals.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_indexing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8198f52051b21c3e5c4b8faf2e080af2a22f067d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_is_monotonic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_is_monotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8abea181a4e7693770218de3aa97ae8fcf860cc0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_is_monotonic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_sort_values.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_sort_values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f640a460f905cfa4a6cb6e8b2b8d6ec8408fc1cd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_sort_values.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_value_counts.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_value_counts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84b1de98c70a36cb72586f2c627714f04bb50b6c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_value_counts.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_arithmetic.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..3a7c418b27de6ddf79c87a813d43f21369ecc367 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -0,0 +1,56 @@ +# Arithmetic tests specific to DatetimeIndex are generally about `freq` +# rentention or inference. Other arithmetic tests belong in +# tests/arithmetic/test_datetime64.py +import pytest + +from pandas import ( + Timedelta, + TimedeltaIndex, + Timestamp, + date_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestDatetimeIndexArithmetic: + def test_add_timedelta_preserves_freq(self): + # GH#37295 should hold for any DTI with freq=None or Tick freq + tz = "Canada/Eastern" + dti = date_range( + start=Timestamp("2019-03-26 00:00:00-0400", tz=tz), + end=Timestamp("2020-10-17 00:00:00-0400", tz=tz), + freq="D", + ) + result = dti + Timedelta(days=1) + assert result.freq == dti.freq + + def test_sub_datetime_preserves_freq(self, tz_naive_fixture): + # GH#48818 + dti = date_range("2016-01-01", periods=12, tz=tz_naive_fixture) + + res = dti - dti[0] + expected = timedelta_range("0 Days", "11 Days") + tm.assert_index_equal(res, expected) + assert res.freq == expected.freq + + @pytest.mark.xfail( + reason="The inherited freq is incorrect bc dti.freq is incorrect " + "https://github.com/pandas-dev/pandas/pull/48818/files#r982793461" + ) + def test_sub_datetime_preserves_freq_across_dst(self): + # GH#48818 + ts = Timestamp("2016-03-11", tz="US/Pacific") + dti = date_range(ts, periods=4) + + res = dti - dti[0] + expected = TimedeltaIndex( + [ + Timedelta(days=0), + Timedelta(days=1), + Timedelta(days=2), + Timedelta(days=2, hours=23), + ] + ) + tm.assert_index_equal(res, expected) + assert res.freq == expected.freq diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_iter.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_iter.py new file mode 100644 index 0000000000000000000000000000000000000000..a006ed79f27baed75bedb95e6f24e948e429172e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_iter.py @@ -0,0 +1,76 @@ +import dateutil.tz +import numpy as np +import pytest + +from pandas import ( + DatetimeIndex, + date_range, + to_datetime, +) +from pandas.core.arrays import datetimes + + +class TestDatetimeIndexIteration: + @pytest.mark.parametrize( + "tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)] + ) + def test_iteration_preserves_nanoseconds(self, tz): + # GH#19603 + index = DatetimeIndex( + ["2018-02-08 15:00:00.168456358", "2018-02-08 15:00:00.168456359"], tz=tz + ) + for i, ts in enumerate(index): + assert ts == index[i] # pylint: disable=unnecessary-list-index-lookup + + def test_iter_readonly(self): + # GH#28055 ints_to_pydatetime with readonly array + arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")]) + arr.setflags(write=False) + dti = to_datetime(arr) + list(dti) + + def test_iteration_preserves_tz(self): + # see GH#8890 + index = date_range("2012-01-01", periods=3, freq="h", tz="US/Eastern") + + for i, ts in enumerate(index): + result = ts + expected = index[i] # pylint: disable=unnecessary-list-index-lookup + assert result == expected + + def test_iteration_preserves_tz2(self): + index = date_range( + "2012-01-01", periods=3, freq="h", tz=dateutil.tz.tzoffset(None, -28800) + ) + + for i, ts in enumerate(index): + result = ts + expected = index[i] # pylint: disable=unnecessary-list-index-lookup + assert result._repr_base == expected._repr_base + assert result == expected + + def test_iteration_preserves_tz3(self): + # GH#9100 + index = DatetimeIndex( + ["2014-12-01 03:32:39.987000-08:00", "2014-12-01 04:12:34.987000-08:00"] + ) + for i, ts in enumerate(index): + result = ts + expected = index[i] # pylint: disable=unnecessary-list-index-lookup + assert result._repr_base == expected._repr_base + assert result == expected + + @pytest.mark.parametrize("offset", [-5, -1, 0, 1]) + def test_iteration_over_chunksize(self, offset, monkeypatch): + # GH#21012 + chunksize = 5 + index = date_range( + "2000-01-01 00:00:00", periods=chunksize - offset, freq="min" + ) + num = 0 + with monkeypatch.context() as m: + m.setattr(datetimes, "_ITER_CHUNKSIZE", chunksize) + for stamp in index: + assert index[num] == stamp + num += 1 + assert num == len(index) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_join.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_join.py new file mode 100644 index 0000000000000000000000000000000000000000..d0ac32939296c3f6f0fb0ecb501dde9d65ba989d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_join.py @@ -0,0 +1,149 @@ +from datetime import ( + datetime, + timezone, +) + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Timestamp, + date_range, + period_range, + to_datetime, +) +import pandas._testing as tm + +from pandas.tseries.offsets import ( + BDay, + BMonthEnd, +) + + +class TestJoin: + def test_does_not_convert_mixed_integer(self): + df = DataFrame(np.ones((3, 2)), columns=date_range("2020-01-01", periods=2)) + cols = df.columns.join(df.index, how="outer") + joined = cols.join(df.columns) + assert cols.dtype == np.dtype("O") + assert cols.dtype == joined.dtype + tm.assert_numpy_array_equal(cols.values, joined.values) + + def test_join_self(self, join_type): + index = date_range("1/1/2000", periods=10) + joined = index.join(index, how=join_type) + assert index is joined + + def test_join_with_period_index(self, join_type): + df = DataFrame( + np.ones((10, 2)), + index=date_range("2020-01-01", periods=10), + columns=period_range("2020-01-01", periods=2), + ) + s = df.iloc[:5, 0] + + expected = df.columns.astype("O").join(s.index, how=join_type) + result = df.columns.join(s.index, how=join_type) + tm.assert_index_equal(expected, result) + + def test_join_object_index(self): + rng = date_range("1/1/2000", periods=10) + idx = Index(["a", "b", "c", "d"]) + + result = rng.join(idx, how="outer") + assert isinstance(result[0], Timestamp) + + def test_join_utc_convert(self, join_type): + rng = date_range("1/1/2011", periods=100, freq="h", tz="utc") + + left = rng.tz_convert("US/Eastern") + right = rng.tz_convert("Europe/Berlin") + + result = left.join(left[:-5], how=join_type) + assert isinstance(result, DatetimeIndex) + assert result.tz == left.tz + + result = left.join(right[:-5], how=join_type) + assert isinstance(result, DatetimeIndex) + assert result.tz is timezone.utc + + def test_datetimeindex_union_join_empty(self, sort): + dti = date_range(start="1/1/2001", end="2/1/2001", freq="D") + empty = Index([]) + + result = dti.union(empty, sort=sort) + expected = dti.astype("O") + tm.assert_index_equal(result, expected) + + result = dti.join(empty) + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, dti) + + def test_join_nonunique(self): + idx1 = to_datetime(["2012-11-06 16:00:11.477563", "2012-11-06 16:00:11.477563"]) + idx2 = to_datetime(["2012-11-06 15:11:09.006507", "2012-11-06 15:11:09.006507"]) + rs = idx1.join(idx2, how="outer") + assert rs.is_monotonic_increasing + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_outer_join(self, freq): + # should just behave as union + start, end = datetime(2009, 1, 1), datetime(2010, 1, 1) + rng = date_range(start=start, end=end, freq=freq) + + # overlapping + left = rng[:10] + right = rng[5:10] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + + # non-overlapping, gap in middle + left = rng[:5] + right = rng[10:] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + assert the_join.freq is None + + # non-overlapping, no gap + left = rng[:5] + right = rng[5:10] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + + # overlapping, but different offset + other = date_range(start, end, freq=BMonthEnd()) + + the_join = rng.join(other, how="outer") + assert isinstance(the_join, DatetimeIndex) + assert the_join.freq is None + + def test_naive_aware_conflicts(self): + start, end = datetime(2009, 1, 1), datetime(2010, 1, 1) + naive = date_range(start, end, freq=BDay(), tz=None) + aware = date_range(start, end, freq=BDay(), tz="Asia/Hong_Kong") + + msg = "tz-naive.*tz-aware" + with pytest.raises(TypeError, match=msg): + naive.join(aware) + + with pytest.raises(TypeError, match=msg): + aware.join(naive) + + @pytest.mark.parametrize("tz", [None, "US/Pacific"]) + def test_join_preserves_freq(self, tz): + # GH#32157 + dti = date_range("2016-01-01", periods=10, tz=tz) + result = dti[:5].join(dti[5:], how="outer") + assert result.freq == dti.freq + tm.assert_index_equal(result, dti) + + result = dti[:5].join(dti[6:], how="outer") + assert result.freq is None + expected = dti.delete(5) + tm.assert_index_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_ops.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..bac9548b932c163dc7a33282796c1bb682187664 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_ops.py @@ -0,0 +1,56 @@ +from datetime import datetime + +import pytest + +from pandas import ( + DatetimeIndex, + Index, + bdate_range, + date_range, +) +import pandas._testing as tm + + +class TestDatetimeIndexOps: + def test_infer_freq(self, freq_sample): + # GH 11018 + idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10) + result = DatetimeIndex(idx.asi8, freq="infer") + tm.assert_index_equal(idx, result) + assert result.freq == freq_sample + + +@pytest.mark.parametrize("freq", ["B", "C"]) +class TestBusinessDatetimeIndex: + @pytest.fixture + def rng(self, freq): + START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) + return bdate_range(START, END, freq=freq) + + def test_comparison(self, rng): + d = rng[10] + + comp = rng > d + assert comp[11] + assert not comp[9] + + def test_copy(self, rng): + cp = rng.copy() + tm.assert_index_equal(cp, rng) + + def test_identical(self, rng): + t1 = rng.copy() + t2 = rng.copy() + assert t1.identical(t2) + + # name + t1 = t1.rename("foo") + assert t1.equals(t2) + assert not t1.identical(t2) + t2 = t2.rename("foo") + assert t1.identical(t2) + + # freq + t2v = Index(t2.values) + assert t1.equals(t2v) + assert not t1.identical(t2v) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_pickle.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..922b4a18119f4d457de501225611f8884689d434 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_pickle.py @@ -0,0 +1,45 @@ +import pytest + +from pandas import ( + NaT, + date_range, + to_datetime, +) +import pandas._testing as tm + + +class TestPickle: + def test_pickle(self): + # GH#4606 + idx = to_datetime(["2013-01-01", NaT, "2014-01-06"]) + idx_p = tm.round_trip_pickle(idx) + assert idx_p[0] == idx[0] + assert idx_p[1] is NaT + assert idx_p[2] == idx[2] + + def test_pickle_dont_infer_freq(self): + # GH#11002 + # don't infer freq + idx = date_range("1750-1-1", "2050-1-1", freq="7D") + idx_p = tm.round_trip_pickle(idx) + tm.assert_index_equal(idx, idx_p) + + def test_pickle_after_set_freq(self): + dti = date_range("20130101", periods=3, tz="US/Eastern", name="foo") + dti = dti._with_freq(None) + + res = tm.round_trip_pickle(dti) + tm.assert_index_equal(res, dti) + + def test_roundtrip_pickle_with_tz(self): + # GH#8367 + # round-trip of timezone + index = date_range("20130101", periods=3, tz="US/Eastern", name="foo") + unpickled = tm.round_trip_pickle(index) + tm.assert_index_equal(index, unpickled) + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_pickle_unpickle(self, freq): + rng = date_range("2009-01-01", "2010-01-01", freq=freq) + unpickled = tm.round_trip_pickle(rng) + assert unpickled.freq == freq diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..e93fc0e2a4e2e740e2dee27e332b68b060ba7aa7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -0,0 +1,329 @@ +""" +Tests for DatetimeIndex methods behaving like their Timestamp counterparts +""" + +import calendar +from datetime import ( + date, + datetime, + time, +) +import locale +import unicodedata + +import numpy as np +import pytest + +from pandas._libs.tslibs import timezones + +from pandas import ( + DatetimeIndex, + Index, + NaT, + Timestamp, + date_range, + offsets, +) +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray + + +class TestDatetimeIndexOps: + def test_dti_no_millisecond_field(self): + msg = "type object 'DatetimeIndex' has no attribute 'millisecond'" + with pytest.raises(AttributeError, match=msg): + DatetimeIndex.millisecond + + msg = "'DatetimeIndex' object has no attribute 'millisecond'" + with pytest.raises(AttributeError, match=msg): + DatetimeIndex([]).millisecond + + def test_dti_time(self): + rng = date_range("1/1/2000", freq="12min", periods=10) + result = Index(rng).time + expected = [t.time() for t in rng] + assert (result == expected).all() + + def test_dti_date(self): + rng = date_range("1/1/2000", freq="12h", periods=10) + result = Index(rng).date + expected = [t.date() for t in rng] + assert (result == expected).all() + + @pytest.mark.parametrize( + "dtype", + [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], + ) + def test_dti_date2(self, dtype): + # Regression test for GH#21230 + expected = np.array([date(2018, 6, 4), NaT]) + + index = DatetimeIndex(["2018-06-04 10:00:00", NaT], dtype=dtype) + result = index.date + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], + ) + def test_dti_time2(self, dtype): + # Regression test for GH#21267 + expected = np.array([time(10, 20, 30), NaT]) + + index = DatetimeIndex(["2018-06-04 10:20:30", NaT], dtype=dtype) + result = index.time + + tm.assert_numpy_array_equal(result, expected) + + def test_dti_timetz(self, tz_naive_fixture): + # GH#21358 + tz = timezones.maybe_get_tz(tz_naive_fixture) + + expected = np.array([time(10, 20, 30, tzinfo=tz), NaT]) + + index = DatetimeIndex(["2018-06-04 10:20:30", NaT], tz=tz) + result = index.timetz + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "field", + [ + "dayofweek", + "day_of_week", + "dayofyear", + "day_of_year", + "quarter", + "days_in_month", + "is_month_start", + "is_month_end", + "is_quarter_start", + "is_quarter_end", + "is_year_start", + "is_year_end", + ], + ) + def test_dti_timestamp_fields(self, field): + # extra fields from DatetimeIndex like quarter and week + idx = date_range("2020-01-01", periods=10) + expected = getattr(idx, field)[-1] + + result = getattr(Timestamp(idx[-1]), field) + assert result == expected + + def test_dti_nanosecond(self): + dti = DatetimeIndex(np.arange(10)) + expected = Index(np.arange(10, dtype=np.int32)) + + tm.assert_index_equal(dti.nanosecond, expected) + + @pytest.mark.parametrize("prefix", ["", "dateutil/"]) + def test_dti_hour_tzaware(self, prefix): + strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] + rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern") + assert (rng.hour == 0).all() + + # a more unusual time zone, GH#1946 + dr = date_range( + "2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan" + ) + + expected = Index(np.arange(10, dtype=np.int32)) + tm.assert_index_equal(dr.hour, expected) + + # GH#12806 + # error: Unsupported operand types for + ("List[None]" and "List[str]") + @pytest.mark.parametrize( + "time_locale", [None] + tm.get_locales() # type: ignore[operator] + ) + def test_day_name_month_name(self, time_locale): + # Test Monday -> Sunday and January -> December, in that sequence + if time_locale is None: + # If the time_locale is None, day-name and month_name should + # return the english attributes + expected_days = [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + ] + expected_months = [ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + else: + with tm.set_locale(time_locale, locale.LC_TIME): + expected_days = calendar.day_name[:] + expected_months = calendar.month_name[1:] + + # GH#11128 + dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365) + english_days = [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + ] + for day, name, eng_name in zip(range(4, 11), expected_days, english_days): + name = name.capitalize() + assert dti.day_name(locale=time_locale)[day] == name + assert dti.day_name(locale=None)[day] == eng_name + ts = Timestamp(datetime(2016, 4, day)) + assert ts.day_name(locale=time_locale) == name + dti = dti.append(DatetimeIndex([NaT])) + assert np.isnan(dti.day_name(locale=time_locale)[-1]) + ts = Timestamp(NaT) + assert np.isnan(ts.day_name(locale=time_locale)) + + # GH#12805 + dti = date_range(freq="ME", start="2012", end="2013") + result = dti.month_name(locale=time_locale) + expected = Index([month.capitalize() for month in expected_months]) + + # work around different normalization schemes GH#22342 + result = result.str.normalize("NFD") + expected = expected.str.normalize("NFD") + + tm.assert_index_equal(result, expected) + + for item, expected in zip(dti, expected_months): + result = item.month_name(locale=time_locale) + expected = expected.capitalize() + + result = unicodedata.normalize("NFD", result) + expected = unicodedata.normalize("NFD", result) + + assert result == expected + dti = dti.append(DatetimeIndex([NaT])) + assert np.isnan(dti.month_name(locale=time_locale)[-1]) + + def test_dti_week(self): + # GH#6538: Check that DatetimeIndex and its TimeStamp elements + # return the same weekofyear accessor close to new year w/ tz + dates = ["2013/12/29", "2013/12/30", "2013/12/31"] + dates = DatetimeIndex(dates, tz="Europe/Brussels") + expected = [52, 1, 1] + assert dates.isocalendar().week.tolist() == expected + assert [d.weekofyear for d in dates] == expected + + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_dti_fields(self, tz): + # GH#13303 + dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365, tz=tz) + assert dti.year[0] == 1998 + assert dti.month[0] == 1 + assert dti.day[0] == 1 + assert dti.hour[0] == 0 + assert dti.minute[0] == 0 + assert dti.second[0] == 0 + assert dti.microsecond[0] == 0 + assert dti.dayofweek[0] == 3 + + assert dti.dayofyear[0] == 1 + assert dti.dayofyear[120] == 121 + + assert dti.isocalendar().week.iloc[0] == 1 + assert dti.isocalendar().week.iloc[120] == 18 + + assert dti.quarter[0] == 1 + assert dti.quarter[120] == 2 + + assert dti.days_in_month[0] == 31 + assert dti.days_in_month[90] == 30 + + assert dti.is_month_start[0] + assert not dti.is_month_start[1] + assert dti.is_month_start[31] + assert dti.is_quarter_start[0] + assert dti.is_quarter_start[90] + assert dti.is_year_start[0] + assert not dti.is_year_start[364] + assert not dti.is_month_end[0] + assert dti.is_month_end[30] + assert not dti.is_month_end[31] + assert dti.is_month_end[364] + assert not dti.is_quarter_end[0] + assert not dti.is_quarter_end[30] + assert dti.is_quarter_end[89] + assert dti.is_quarter_end[364] + assert not dti.is_year_end[0] + assert dti.is_year_end[364] + + assert len(dti.year) == 365 + assert len(dti.month) == 365 + assert len(dti.day) == 365 + assert len(dti.hour) == 365 + assert len(dti.minute) == 365 + assert len(dti.second) == 365 + assert len(dti.microsecond) == 365 + assert len(dti.dayofweek) == 365 + assert len(dti.dayofyear) == 365 + assert len(dti.isocalendar()) == 365 + assert len(dti.quarter) == 365 + assert len(dti.is_month_start) == 365 + assert len(dti.is_month_end) == 365 + assert len(dti.is_quarter_start) == 365 + assert len(dti.is_quarter_end) == 365 + assert len(dti.is_year_start) == 365 + assert len(dti.is_year_end) == 365 + + dti.name = "name" + + # non boolean accessors -> return Index + for accessor in DatetimeArray._field_ops: + res = getattr(dti, accessor) + assert len(res) == 365 + assert isinstance(res, Index) + assert res.name == "name" + + # boolean accessors -> return array + for accessor in DatetimeArray._bool_ops: + res = getattr(dti, accessor) + assert len(res) == 365 + assert isinstance(res, np.ndarray) + + # test boolean indexing + res = dti[dti.is_quarter_start] + exp = dti[[0, 90, 181, 273]] + tm.assert_index_equal(res, exp) + res = dti[dti.is_leap_year] + exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name").as_unit("ns") + tm.assert_index_equal(res, exp) + + def test_dti_is_year_quarter_start(self): + dti = date_range(freq="BQE-FEB", start=datetime(1998, 1, 1), periods=4) + + assert sum(dti.is_quarter_start) == 0 + assert sum(dti.is_quarter_end) == 4 + assert sum(dti.is_year_start) == 0 + assert sum(dti.is_year_end) == 1 + + def test_dti_is_month_start(self): + dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"]) + + assert dti.is_month_start[0] == 1 + + def test_dti_is_month_start_custom(self): + # Ensure is_start/end accessors throw ValueError for CustomBusinessDay, + bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu") + dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt) + msg = "Custom business days is not supported by is_month_start" + with pytest.raises(ValueError, match=msg): + dti.is_month_start diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a0b3db151b77ae6a725a9a5d22a93004a12019a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_astype.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25f4a7a01ba910ead8a50766014433fda4975a8d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_astype.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_indexing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de06dacb69ccf0e119b2fe3f7648c25dccf0fe85 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_join.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_join.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e61950a95080c2a6b7e52bb26259c6b6fb6cae7a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_join.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_numeric.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_numeric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..623c25ef80305b9c60a06d68fb8e536ba0ec91bc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_numeric.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_setops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_setops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53ab3490c1fe77dd92726b7caf927a0958ee8c60 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/__pycache__/test_setops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_astype.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..1c2df6008de5d85789b026e947ac27a8036a9be7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_astype.py @@ -0,0 +1,95 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + to_datetime, + to_timedelta, +) +import pandas._testing as tm + + +class TestAstype: + def test_astype_float64_to_uint64(self): + # GH#45309 used to incorrectly return Index with int64 dtype + idx = Index([0.0, 5.0, 10.0, 15.0, 20.0], dtype=np.float64) + result = idx.astype("u8") + expected = Index([0, 5, 10, 15, 20], dtype=np.uint64) + tm.assert_index_equal(result, expected, exact=True) + + idx_with_negatives = idx - 10 + with pytest.raises(ValueError, match="losslessly"): + idx_with_negatives.astype(np.uint64) + + def test_astype_float64_to_object(self): + float_index = Index([0.0, 2.5, 5.0, 7.5, 10.0], dtype=np.float64) + result = float_index.astype(object) + assert result.equals(float_index) + assert float_index.equals(result) + assert isinstance(result, Index) and result.dtype == object + + def test_astype_float64_mixed_to_object(self): + # mixed int-float + idx = Index([1.5, 2, 3, 4, 5], dtype=np.float64) + idx.name = "foo" + result = idx.astype(object) + assert result.equals(idx) + assert idx.equals(result) + assert isinstance(result, Index) and result.dtype == object + + @pytest.mark.parametrize("dtype", ["int16", "int32", "int64"]) + def test_astype_float64_to_int_dtype(self, dtype): + # GH#12881 + # a float astype int + idx = Index([0, 1, 2], dtype=np.float64) + result = idx.astype(dtype) + expected = Index([0, 1, 2], dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + idx = Index([0, 1.1, 2], dtype=np.float64) + result = idx.astype(dtype) + expected = Index([0, 1, 2], dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + @pytest.mark.parametrize("dtype", ["float32", "float64"]) + def test_astype_float64_to_float_dtype(self, dtype): + # GH#12881 + # a float astype int + idx = Index([0, 1, 2], dtype=np.float64) + result = idx.astype(dtype) + assert isinstance(result, Index) and result.dtype == dtype + + @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) + def test_astype_float_to_datetimelike(self, dtype): + # GH#49660 pre-2.0 Index.astype from floating to M8/m8/Period raised, + # inconsistent with Series.astype + idx = Index([0, 1.1, 2], dtype=np.float64) + + result = idx.astype(dtype) + if dtype[0] == "M": + expected = to_datetime(idx.values) + else: + expected = to_timedelta(idx.values) + tm.assert_index_equal(result, expected) + + # check that we match Series behavior + result = idx.to_series().set_axis(range(3)).astype(dtype) + expected = expected.to_series().set_axis(range(3)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [int, "int16", "int32", "int64"]) + @pytest.mark.parametrize("non_finite", [np.inf, np.nan]) + def test_cannot_cast_inf_to_int(self, non_finite, dtype): + # GH#13149 + idx = Index([1, 2, non_finite], dtype=np.float64) + + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(ValueError, match=msg): + idx.astype(dtype) + + def test_astype_from_object(self): + index = Index([1.0, np.nan, 0.2], dtype="object") + result = index.astype(float) + expected = Index([1.0, np.nan, 0.2], dtype=np.float64) + assert result.dtype == expected.dtype + tm.assert_index_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_indexing.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..cd28d519313ed36228040361dfbb2a8dccf77be5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_indexing.py @@ -0,0 +1,611 @@ +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +from pandas import ( + NA, + Index, + RangeIndex, + Series, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowExtensionArray, + FloatingArray, +) + + +@pytest.fixture +def index_large(): + # large values used in Index[uint64] tests where no compat needed with Int64/Float64 + large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] + return Index(large, dtype=np.uint64) + + +class TestGetLoc: + def test_get_loc(self): + index = Index([0, 1, 2]) + assert index.get_loc(1) == 1 + + def test_get_loc_raises_bad_label(self): + index = Index([0, 1, 2]) + with pytest.raises(InvalidIndexError, match=r"\[1, 2\]"): + index.get_loc([1, 2]) + + def test_get_loc_float64(self): + idx = Index([0.0, 1.0, 2.0], dtype=np.float64) + + with pytest.raises(KeyError, match="^'foo'$"): + idx.get_loc("foo") + with pytest.raises(KeyError, match=r"^1\.5$"): + idx.get_loc(1.5) + with pytest.raises(KeyError, match="^True$"): + idx.get_loc(True) + with pytest.raises(KeyError, match="^False$"): + idx.get_loc(False) + + def test_get_loc_na(self): + idx = Index([np.nan, 1, 2], dtype=np.float64) + assert idx.get_loc(1) == 1 + assert idx.get_loc(np.nan) == 0 + + idx = Index([np.nan, 1, np.nan], dtype=np.float64) + assert idx.get_loc(1) == 1 + + # representable by slice [0:2:2] + msg = "'Cannot get left slice bound for non-unique label: nan'" + with pytest.raises(KeyError, match=msg): + idx.slice_locs(np.nan) + # not representable by slice + idx = Index([np.nan, 1, np.nan, np.nan], dtype=np.float64) + assert idx.get_loc(1) == 1 + msg = "'Cannot get left slice bound for non-unique label: nan" + with pytest.raises(KeyError, match=msg): + idx.slice_locs(np.nan) + + def test_get_loc_missing_nan(self): + # GH#8569 + idx = Index([1, 2], dtype=np.float64) + assert idx.get_loc(1) == 0 + with pytest.raises(KeyError, match=r"^3$"): + idx.get_loc(3) + with pytest.raises(KeyError, match="^nan$"): + idx.get_loc(np.nan) + with pytest.raises(InvalidIndexError, match=r"\[nan\]"): + # listlike/non-hashable raises TypeError + idx.get_loc([np.nan]) + + @pytest.mark.parametrize("vals", [[1], [1.0], [Timestamp("2019-12-31")], ["test"]]) + def test_get_loc_float_index_nan_with_method(self, vals): + # GH#39382 + idx = Index(vals) + with pytest.raises(KeyError, match="nan"): + idx.get_loc(np.nan) + + @pytest.mark.parametrize("dtype", ["f8", "i8", "u8"]) + def test_get_loc_numericindex_none_raises(self, dtype): + # case that goes through searchsorted and key is non-comparable to values + arr = np.arange(10**7, dtype=dtype) + idx = Index(arr) + with pytest.raises(KeyError, match="None"): + idx.get_loc(None) + + def test_get_loc_overflows(self): + # unique but non-monotonic goes through IndexEngine.mapping.get_item + idx = Index([0, 2, 1]) + + val = np.iinfo(np.int64).max + 1 + + with pytest.raises(KeyError, match=str(val)): + idx.get_loc(val) + with pytest.raises(KeyError, match=str(val)): + idx._engine.get_loc(val) + + +class TestGetIndexer: + def test_get_indexer(self): + index1 = Index([1, 2, 3, 4, 5]) + index2 = Index([2, 4, 6]) + + r1 = index1.get_indexer(index2) + e1 = np.array([1, 3, -1], dtype=np.intp) + tm.assert_almost_equal(r1, e1) + + @pytest.mark.parametrize("reverse", [True, False]) + @pytest.mark.parametrize( + "expected,method", + [ + (np.array([-1, 0, 0, 1, 1], dtype=np.intp), "pad"), + (np.array([-1, 0, 0, 1, 1], dtype=np.intp), "ffill"), + (np.array([0, 0, 1, 1, 2], dtype=np.intp), "backfill"), + (np.array([0, 0, 1, 1, 2], dtype=np.intp), "bfill"), + ], + ) + def test_get_indexer_methods(self, reverse, expected, method): + index1 = Index([1, 2, 3, 4, 5]) + index2 = Index([2, 4, 6]) + + if reverse: + index1 = index1[::-1] + expected = expected[::-1] + + result = index2.get_indexer(index1, method=method) + tm.assert_almost_equal(result, expected) + + def test_get_indexer_invalid(self): + # GH10411 + index = Index(np.arange(10)) + + with pytest.raises(ValueError, match="tolerance argument"): + index.get_indexer([1, 0], tolerance=1) + + with pytest.raises(ValueError, match="limit argument"): + index.get_indexer([1, 0], limit=1) + + @pytest.mark.parametrize( + "method, tolerance, indexer, expected", + [ + ("pad", None, [0, 5, 9], [0, 5, 9]), + ("backfill", None, [0, 5, 9], [0, 5, 9]), + ("nearest", None, [0, 5, 9], [0, 5, 9]), + ("pad", 0, [0, 5, 9], [0, 5, 9]), + ("backfill", 0, [0, 5, 9], [0, 5, 9]), + ("nearest", 0, [0, 5, 9], [0, 5, 9]), + ("pad", None, [0.2, 1.8, 8.5], [0, 1, 8]), + ("backfill", None, [0.2, 1.8, 8.5], [1, 2, 9]), + ("nearest", None, [0.2, 1.8, 8.5], [0, 2, 9]), + ("pad", 1, [0.2, 1.8, 8.5], [0, 1, 8]), + ("backfill", 1, [0.2, 1.8, 8.5], [1, 2, 9]), + ("nearest", 1, [0.2, 1.8, 8.5], [0, 2, 9]), + ("pad", 0.2, [0.2, 1.8, 8.5], [0, -1, -1]), + ("backfill", 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]), + ("nearest", 0.2, [0.2, 1.8, 8.5], [0, 2, -1]), + ], + ) + def test_get_indexer_nearest(self, method, tolerance, indexer, expected): + index = Index(np.arange(10)) + + actual = index.get_indexer(indexer, method=method, tolerance=tolerance) + tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) + + @pytest.mark.parametrize("listtype", [list, tuple, Series, np.array]) + @pytest.mark.parametrize( + "tolerance, expected", + list( + zip( + [[0.3, 0.3, 0.1], [0.2, 0.1, 0.1], [0.1, 0.5, 0.5]], + [[0, 2, -1], [0, -1, -1], [-1, 2, 9]], + ) + ), + ) + def test_get_indexer_nearest_listlike_tolerance( + self, tolerance, expected, listtype + ): + index = Index(np.arange(10)) + + actual = index.get_indexer( + [0.2, 1.8, 8.5], method="nearest", tolerance=listtype(tolerance) + ) + tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) + + def test_get_indexer_nearest_error(self): + index = Index(np.arange(10)) + with pytest.raises(ValueError, match="limit argument"): + index.get_indexer([1, 0], method="nearest", limit=1) + + with pytest.raises(ValueError, match="tolerance size must match"): + index.get_indexer([1, 0], method="nearest", tolerance=[1, 2, 3]) + + @pytest.mark.parametrize( + "method,expected", + [("pad", [8, 7, 0]), ("backfill", [9, 8, 1]), ("nearest", [9, 7, 0])], + ) + def test_get_indexer_nearest_decreasing(self, method, expected): + index = Index(np.arange(10))[::-1] + + actual = index.get_indexer([0, 5, 9], method=method) + tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp)) + + actual = index.get_indexer([0.2, 1.8, 8.5], method=method) + tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) + + @pytest.mark.parametrize("idx_dtype", ["int64", "float64", "uint64", "range"]) + @pytest.mark.parametrize("method", ["get_indexer", "get_indexer_non_unique"]) + def test_get_indexer_numeric_index_boolean_target(self, method, idx_dtype): + # GH 16877 + + if idx_dtype == "range": + numeric_index = RangeIndex(4) + else: + numeric_index = Index(np.arange(4, dtype=idx_dtype)) + + other = Index([True, False, True]) + + result = getattr(numeric_index, method)(other) + expected = np.array([-1, -1, -1], dtype=np.intp) + if method == "get_indexer": + tm.assert_numpy_array_equal(result, expected) + else: + missing = np.arange(3, dtype=np.intp) + tm.assert_numpy_array_equal(result[0], expected) + tm.assert_numpy_array_equal(result[1], missing) + + @pytest.mark.parametrize("method", ["pad", "backfill", "nearest"]) + def test_get_indexer_with_method_numeric_vs_bool(self, method): + left = Index([1, 2, 3]) + right = Index([True, False]) + + with pytest.raises(TypeError, match="Cannot compare"): + left.get_indexer(right, method=method) + + with pytest.raises(TypeError, match="Cannot compare"): + right.get_indexer(left, method=method) + + def test_get_indexer_numeric_vs_bool(self): + left = Index([1, 2, 3]) + right = Index([True, False]) + + res = left.get_indexer(right) + expected = -1 * np.ones(len(right), dtype=np.intp) + tm.assert_numpy_array_equal(res, expected) + + res = right.get_indexer(left) + expected = -1 * np.ones(len(left), dtype=np.intp) + tm.assert_numpy_array_equal(res, expected) + + res = left.get_indexer_non_unique(right)[0] + expected = -1 * np.ones(len(right), dtype=np.intp) + tm.assert_numpy_array_equal(res, expected) + + res = right.get_indexer_non_unique(left)[0] + expected = -1 * np.ones(len(left), dtype=np.intp) + tm.assert_numpy_array_equal(res, expected) + + def test_get_indexer_float64(self): + idx = Index([0.0, 1.0, 2.0], dtype=np.float64) + tm.assert_numpy_array_equal( + idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp) + ) + + target = [-0.1, 0.5, 1.1] + tm.assert_numpy_array_equal( + idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp) + ) + + def test_get_indexer_nan(self): + # GH#7820 + result = Index([1, 2, np.nan], dtype=np.float64).get_indexer([np.nan]) + expected = np.array([2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_int64(self): + index = Index(range(0, 20, 2), dtype=np.int64) + target = Index(np.arange(10), dtype=np.int64) + indexer = index.get_indexer(target) + expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = Index(np.arange(10), dtype=np.int64) + indexer = index.get_indexer(target, method="pad") + expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = Index(np.arange(10), dtype=np.int64) + indexer = index.get_indexer(target, method="backfill") + expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + def test_get_indexer_uint64(self, index_large): + target = Index(np.arange(10).astype("uint64") * 5 + 2**63) + indexer = index_large.get_indexer(target) + expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = Index(np.arange(10).astype("uint64") * 5 + 2**63) + indexer = index_large.get_indexer(target, method="pad") + expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = Index(np.arange(10).astype("uint64") * 5 + 2**63) + indexer = index_large.get_indexer(target, method="backfill") + expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + @pytest.mark.parametrize("val, val2", [(4, 5), (4, 4), (4, NA), (NA, NA)]) + def test_get_loc_masked(self, val, val2, any_numeric_ea_and_arrow_dtype): + # GH#39133 + idx = Index([1, 2, 3, val, val2], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.get_loc(2) + assert result == 1 + + with pytest.raises(KeyError, match="9"): + idx.get_loc(9) + + def test_get_loc_masked_na(self, any_numeric_ea_and_arrow_dtype): + # GH#39133 + idx = Index([1, 2, NA], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.get_loc(NA) + assert result == 2 + + idx = Index([1, 2, NA, NA], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.get_loc(NA) + tm.assert_numpy_array_equal(result, np.array([False, False, True, True])) + + idx = Index([1, 2, 3], dtype=any_numeric_ea_and_arrow_dtype) + with pytest.raises(KeyError, match="NA"): + idx.get_loc(NA) + + def test_get_loc_masked_na_and_nan(self): + # GH#39133 + idx = Index( + FloatingArray( + np.array([1, 2, 1, np.nan]), mask=np.array([False, False, True, False]) + ) + ) + result = idx.get_loc(NA) + assert result == 2 + result = idx.get_loc(np.nan) + assert result == 3 + + idx = Index( + FloatingArray(np.array([1, 2, 1.0]), mask=np.array([False, False, True])) + ) + result = idx.get_loc(NA) + assert result == 2 + with pytest.raises(KeyError, match="nan"): + idx.get_loc(np.nan) + + idx = Index( + FloatingArray( + np.array([1, 2, np.nan]), mask=np.array([False, False, False]) + ) + ) + result = idx.get_loc(np.nan) + assert result == 2 + with pytest.raises(KeyError, match="NA"): + idx.get_loc(NA) + + @pytest.mark.parametrize("val", [4, 2]) + def test_get_indexer_masked_na(self, any_numeric_ea_and_arrow_dtype, val): + # GH#39133 + idx = Index([1, 2, NA, 3, val], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.get_indexer_for([1, NA, 5]) + expected = np.array([0, 2, -1]) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + @pytest.mark.parametrize("dtype", ["boolean", "bool[pyarrow]"]) + def test_get_indexer_masked_na_boolean(self, dtype): + # GH#39133 + if dtype == "bool[pyarrow]": + pytest.importorskip("pyarrow") + idx = Index([True, False, NA], dtype=dtype) + result = idx.get_loc(False) + assert result == 1 + result = idx.get_loc(NA) + assert result == 2 + + def test_get_indexer_arrow_dictionary_target(self): + pa = pytest.importorskip("pyarrow") + target = Index( + ArrowExtensionArray( + pa.array([1, 2], type=pa.dictionary(pa.int8(), pa.int8())) + ) + ) + idx = Index([1]) + + result = idx.get_indexer(target) + expected = np.array([0, -1], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) + + result_1, result_2 = idx.get_indexer_non_unique(target) + expected_1, expected_2 = np.array([0, -1], dtype=np.int64), np.array( + [1], dtype=np.int64 + ) + tm.assert_numpy_array_equal(result_1, expected_1) + tm.assert_numpy_array_equal(result_2, expected_2) + + +class TestWhere: + @pytest.mark.parametrize( + "index", + [ + Index(np.arange(5, dtype="float64")), + Index(range(0, 20, 2), dtype=np.int64), + Index(np.arange(5, dtype="uint64")), + ], + ) + def test_where(self, listlike_box, index): + cond = [True] * len(index) + expected = index + result = index.where(listlike_box(cond)) + + cond = [False] + [True] * (len(index) - 1) + expected = Index([index._na_value] + index[1:].tolist(), dtype=np.float64) + result = index.where(listlike_box(cond)) + tm.assert_index_equal(result, expected) + + def test_where_uint64(self): + idx = Index([0, 6, 2], dtype=np.uint64) + mask = np.array([False, True, False]) + other = np.array([1], dtype=np.int64) + + expected = Index([1, 6, 1], dtype=np.uint64) + + result = idx.where(mask, other) + tm.assert_index_equal(result, expected) + + result = idx.putmask(~mask, other) + tm.assert_index_equal(result, expected) + + def test_where_infers_type_instead_of_trying_to_convert_string_to_float(self): + # GH 32413 + index = Index([1, np.nan]) + cond = index.notna() + other = Index(["a", "b"], dtype="string") + + expected = Index([1.0, "b"]) + result = index.where(cond, other) + + tm.assert_index_equal(result, expected) + + +class TestTake: + @pytest.mark.parametrize("idx_dtype", [np.float64, np.int64, np.uint64]) + def test_take_preserve_name(self, idx_dtype): + index = Index([1, 2, 3, 4], dtype=idx_dtype, name="foo") + taken = index.take([3, 0, 1]) + assert index.name == taken.name + + def test_take_fill_value_float64(self): + # GH 12631 + idx = Index([1.0, 2.0, 3.0], name="xxx", dtype=np.float64) + result = idx.take(np.array([1, 0, -1])) + expected = Index([2.0, 1.0, 3.0], dtype=np.float64, name="xxx") + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = Index([2.0, 1.0, np.nan], dtype=np.float64, name="xxx") + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = Index([2.0, 1.0, 3.0], dtype=np.float64, name="xxx") + tm.assert_index_equal(result, expected) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + @pytest.mark.parametrize("dtype", [np.int64, np.uint64]) + def test_take_fill_value_ints(self, dtype): + # see gh-12631 + idx = Index([1, 2, 3], dtype=dtype, name="xxx") + result = idx.take(np.array([1, 0, -1])) + expected = Index([2, 1, 3], dtype=dtype, name="xxx") + tm.assert_index_equal(result, expected) + + name = type(idx).__name__ + msg = f"Unable to fill values because {name} cannot contain NA" + + # fill_value=True + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -1]), fill_value=True) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = Index([2, 1, 3], dtype=dtype, name="xxx") + tm.assert_index_equal(result, expected) + + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + +class TestContains: + @pytest.mark.parametrize("dtype", [np.float64, np.int64, np.uint64]) + def test_contains_none(self, dtype): + # GH#35788 should return False, not raise TypeError + index = Index([0, 1, 2, 3, 4], dtype=dtype) + assert None not in index + + def test_contains_float64_nans(self): + index = Index([1.0, 2.0, np.nan], dtype=np.float64) + assert np.nan in index + + def test_contains_float64_not_nans(self): + index = Index([1.0, 2.0, np.nan], dtype=np.float64) + assert 1.0 in index + + +class TestSliceLocs: + @pytest.mark.parametrize("dtype", [int, float]) + def test_slice_locs(self, dtype): + index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) + n = len(index) + + assert index.slice_locs(start=2) == (2, n) + assert index.slice_locs(start=3) == (3, n) + assert index.slice_locs(3, 8) == (3, 6) + assert index.slice_locs(5, 10) == (3, n) + assert index.slice_locs(end=8) == (0, 6) + assert index.slice_locs(end=9) == (0, 7) + + # reversed + index2 = index[::-1] + assert index2.slice_locs(8, 2) == (2, 6) + assert index2.slice_locs(7, 3) == (2, 5) + + @pytest.mark.parametrize("dtype", [int, float]) + def test_slice_locs_float_locs(self, dtype): + index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) + n = len(index) + assert index.slice_locs(5.0, 10.0) == (3, n) + assert index.slice_locs(4.5, 10.5) == (3, 8) + + index2 = index[::-1] + assert index2.slice_locs(8.5, 1.5) == (2, 6) + assert index2.slice_locs(10.5, -1) == (0, n) + + @pytest.mark.parametrize("dtype", [int, float]) + def test_slice_locs_dup_numeric(self, dtype): + index = Index(np.array([10, 12, 12, 14], dtype=dtype)) + assert index.slice_locs(12, 12) == (1, 3) + assert index.slice_locs(11, 13) == (1, 3) + + index2 = index[::-1] + assert index2.slice_locs(12, 12) == (1, 3) + assert index2.slice_locs(13, 11) == (1, 3) + + def test_slice_locs_na(self): + index = Index([np.nan, 1, 2]) + assert index.slice_locs(1) == (1, 3) + assert index.slice_locs(np.nan) == (0, 3) + + index = Index([0, np.nan, np.nan, 1, 2]) + assert index.slice_locs(np.nan) == (1, 5) + + def test_slice_locs_na_raises(self): + index = Index([np.nan, 1, 2]) + with pytest.raises(KeyError, match=""): + index.slice_locs(start=1.5) + + with pytest.raises(KeyError, match=""): + index.slice_locs(end=1.5) + + +class TestGetSliceBounds: + @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) + def test_get_slice_bounds_within(self, side, expected): + index = Index(range(6)) + result = index.get_slice_bound(4, side=side) + assert result == expected + + @pytest.mark.parametrize("side", ["left", "right"]) + @pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)]) + def test_get_slice_bounds_outside(self, side, expected, bound): + index = Index(range(6)) + result = index.get_slice_bound(bound, side=side) + assert result == expected diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_join.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_join.py new file mode 100644 index 0000000000000000000000000000000000000000..918d5052167356b1d51018434c03e6682f828872 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_join.py @@ -0,0 +1,380 @@ +import numpy as np +import pytest + +import pandas._testing as tm +from pandas.core.indexes.api import Index + + +class TestJoinInt64Index: + def test_join_non_unique(self): + left = Index([4, 4, 3, 3]) + + joined, lidx, ridx = left.join(left, return_indexers=True) + + exp_joined = Index([4, 4, 4, 4, 3, 3, 3, 3]) + tm.assert_index_equal(joined, exp_joined) + + exp_lidx = np.array([0, 0, 1, 1, 2, 2, 3, 3], dtype=np.intp) + tm.assert_numpy_array_equal(lidx, exp_lidx) + + exp_ridx = np.array([0, 1, 0, 1, 2, 3, 2, 3], dtype=np.intp) + tm.assert_numpy_array_equal(ridx, exp_ridx) + + def test_join_inner(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64) + other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64) + + # not monotonic + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) + + # no guarantee of sortedness, so sort for comparison purposes + ind = res.argsort() + res = res.take(ind) + lidx = lidx.take(ind) + ridx = ridx.take(ind) + + eres = Index([2, 12], dtype=np.int64) + elidx = np.array([1, 6], dtype=np.intp) + eridx = np.array([4, 1], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index.join(other_mono, how="inner", return_indexers=True) + + res2 = index.intersection(other_mono) + tm.assert_index_equal(res, res2) + + elidx = np.array([1, 6], dtype=np.intp) + eridx = np.array([1, 4], dtype=np.intp) + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_left(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64) + other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64) + + # not monotonic + res, lidx, ridx = index.join(other, how="left", return_indexers=True) + eres = index + eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index.join(other_mono, how="left", return_indexers=True) + eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp) + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # non-unique + idx = Index([1, 1, 2, 5]) + idx2 = Index([1, 2, 5, 7, 9]) + res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True) + eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 + eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_right(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64) + other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64) + + # not monotonic + res, lidx, ridx = index.join(other, how="right", return_indexers=True) + eres = other + elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp) + + assert isinstance(other, Index) and other.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + # monotonic + res, lidx, ridx = index.join(other_mono, how="right", return_indexers=True) + eres = other_mono + elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp) + assert isinstance(other, Index) and other.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + # non-unique + idx = Index([1, 1, 2, 5]) + idx2 = Index([1, 2, 5, 7, 9]) + res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True) + eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 + elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_non_int_index(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([3, 6, 7, 8, 10], dtype=object) + + outer = index.join(other, how="outer") + outer2 = other.join(index, how="outer") + expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) + tm.assert_index_equal(outer, outer2) + tm.assert_index_equal(outer, expected) + + inner = index.join(other, how="inner") + inner2 = other.join(index, how="inner") + expected = Index([6, 8, 10]) + tm.assert_index_equal(inner, inner2) + tm.assert_index_equal(inner, expected) + + left = index.join(other, how="left") + tm.assert_index_equal(left, index.astype(object)) + + left2 = other.join(index, how="left") + tm.assert_index_equal(left2, other) + + right = index.join(other, how="right") + tm.assert_index_equal(right, other) + + right2 = other.join(index, how="right") + tm.assert_index_equal(right2, index.astype(object)) + + def test_join_outer(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64) + other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64) + + # not monotonic + # guarantee of sortedness + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + eres = Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25], dtype=np.int64) + elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) + eridx = np.array( + [-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], dtype=np.intp + ) + + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index.join(other_mono, how="outer", return_indexers=True) + noidx_res = index.join(other_mono, how="outer") + tm.assert_index_equal(res, noidx_res) + + elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) + eridx = np.array( + [-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], dtype=np.intp + ) + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + +class TestJoinUInt64Index: + @pytest.fixture + def index_large(self): + # large values used in TestUInt64Index where no compat needed with int64/float64 + large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] + return Index(large, dtype=np.uint64) + + def test_join_inner(self, index_large): + other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")) + + # not monotonic + res, lidx, ridx = index_large.join(other, how="inner", return_indexers=True) + + # no guarantee of sortedness, so sort for comparison purposes + ind = res.argsort() + res = res.take(ind) + lidx = lidx.take(ind) + ridx = ridx.take(ind) + + eres = Index(2**63 + np.array([10, 25], dtype="uint64")) + elidx = np.array([1, 4], dtype=np.intp) + eridx = np.array([5, 2], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index_large.join( + other_mono, how="inner", return_indexers=True + ) + + res2 = index_large.intersection(other_mono) + tm.assert_index_equal(res, res2) + + elidx = np.array([1, 4], dtype=np.intp) + eridx = np.array([3, 5], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_left(self, index_large): + other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")) + + # not monotonic + res, lidx, ridx = index_large.join(other, how="left", return_indexers=True) + eres = index_large + eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index_large.join(other_mono, how="left", return_indexers=True) + eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # non-unique + idx = Index(2**63 + np.array([1, 1, 2, 5], dtype="uint64")) + idx2 = Index(2**63 + np.array([1, 2, 5, 7, 9], dtype="uint64")) + res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True) + + # 1 is in idx2, so it should be x2 + eres = Index(2**63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64")) + eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_right(self, index_large): + other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")) + + # not monotonic + res, lidx, ridx = index_large.join(other, how="right", return_indexers=True) + eres = other + elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp) + + tm.assert_numpy_array_equal(lidx, elidx) + assert isinstance(other, Index) and other.dtype == np.uint64 + tm.assert_index_equal(res, eres) + assert ridx is None + + # monotonic + res, lidx, ridx = index_large.join( + other_mono, how="right", return_indexers=True + ) + eres = other_mono + elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp) + + assert isinstance(other, Index) and other.dtype == np.uint64 + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_index_equal(res, eres) + assert ridx is None + + # non-unique + idx = Index(2**63 + np.array([1, 1, 2, 5], dtype="uint64")) + idx2 = Index(2**63 + np.array([1, 2, 5, 7, 9], dtype="uint64")) + res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True) + + # 1 is in idx2, so it should be x2 + eres = Index(2**63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64")) + elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_non_int_index(self, index_large): + other = Index( + 2**63 + np.array([1, 5, 7, 10, 20], dtype="uint64"), dtype=object + ) + + outer = index_large.join(other, how="outer") + outer2 = other.join(index_large, how="outer") + expected = Index( + 2**63 + np.array([0, 1, 5, 7, 10, 15, 20, 25], dtype="uint64") + ) + tm.assert_index_equal(outer, outer2) + tm.assert_index_equal(outer, expected) + + inner = index_large.join(other, how="inner") + inner2 = other.join(index_large, how="inner") + expected = Index(2**63 + np.array([10, 20], dtype="uint64")) + tm.assert_index_equal(inner, inner2) + tm.assert_index_equal(inner, expected) + + left = index_large.join(other, how="left") + tm.assert_index_equal(left, index_large.astype(object)) + + left2 = other.join(index_large, how="left") + tm.assert_index_equal(left2, other) + + right = index_large.join(other, how="right") + tm.assert_index_equal(right, other) + + right2 = other.join(index_large, how="right") + tm.assert_index_equal(right2, index_large.astype(object)) + + def test_join_outer(self, index_large): + other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")) + + # not monotonic + # guarantee of sortedness + res, lidx, ridx = index_large.join(other, how="outer", return_indexers=True) + noidx_res = index_large.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + eres = Index( + 2**63 + np.array([0, 1, 2, 7, 10, 12, 15, 20, 25], dtype="uint64") + ) + elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) + eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index_large.join( + other_mono, how="outer", return_indexers=True + ) + noidx_res = index_large.join(other_mono, how="outer") + tm.assert_index_equal(res, noidx_res) + + elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) + eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_numeric.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..4fd807e1827ddc4faf900f15dcefa18c08d4cd0b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_numeric.py @@ -0,0 +1,553 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + Series, +) +import pandas._testing as tm + + +class TestFloatNumericIndex: + @pytest.fixture(params=[np.float64, np.float32]) + def dtype(self, request): + return request.param + + @pytest.fixture + def simple_index(self, dtype): + values = np.arange(5, dtype=dtype) + return Index(values) + + @pytest.fixture( + params=[ + [1.5, 2, 3, 4, 5], + [0.0, 2.5, 5.0, 7.5, 10.0], + [5, 4, 3, 2, 1.5], + [10.0, 7.5, 5.0, 2.5, 0.0], + ], + ids=["mixed", "float", "mixed_dec", "float_dec"], + ) + def index(self, request, dtype): + return Index(request.param, dtype=dtype) + + @pytest.fixture + def mixed_index(self, dtype): + return Index([1.5, 2, 3, 4, 5], dtype=dtype) + + @pytest.fixture + def float_index(self, dtype): + return Index([0.0, 2.5, 5.0, 7.5, 10.0], dtype=dtype) + + def test_repr_roundtrip(self, index): + tm.assert_index_equal(eval(repr(index)), index, exact=True) + + def check_coerce(self, a, b, is_float_index=True): + assert a.equals(b) + tm.assert_index_equal(a, b, exact=False) + if is_float_index: + assert isinstance(b, Index) + else: + assert type(b) is Index + + def test_constructor_from_list_no_dtype(self): + index = Index([1.5, 2.5, 3.5]) + assert index.dtype == np.float64 + + def test_constructor(self, dtype): + index_cls = Index + + # explicit construction + index = index_cls([1, 2, 3, 4, 5], dtype=dtype) + + assert isinstance(index, index_cls) + assert index.dtype == dtype + + expected = np.array([1, 2, 3, 4, 5], dtype=dtype) + tm.assert_numpy_array_equal(index.values, expected) + + index = index_cls(np.array([1, 2, 3, 4, 5]), dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + # nan handling + result = index_cls([np.nan, np.nan], dtype=dtype) + assert pd.isna(result.values).all() + + result = index_cls(np.array([np.nan]), dtype=dtype) + assert pd.isna(result.values).all() + + def test_constructor_invalid(self): + index_cls = Index + cls_name = index_cls.__name__ + # invalid + msg = ( + rf"{cls_name}\(\.\.\.\) must be called with a collection of " + r"some kind, 0\.0 was passed" + ) + with pytest.raises(TypeError, match=msg): + index_cls(0.0) + + def test_constructor_coerce(self, mixed_index, float_index): + self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5])) + self.check_coerce(float_index, Index(np.arange(5) * 2.5)) + + result = Index(np.array(np.arange(5) * 2.5, dtype=object)) + assert result.dtype == object # as of 2.0 to match Series + self.check_coerce(float_index, result.astype("float64")) + + def test_constructor_explicit(self, mixed_index, float_index): + # these don't auto convert + self.check_coerce( + float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False + ) + self.check_coerce( + mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False + ) + + def test_type_coercion_fail(self, any_int_numpy_dtype): + # see gh-15832 + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + Index([1, 2, 3.5], dtype=any_int_numpy_dtype) + + def test_equals_numeric(self): + index_cls = Index + + idx = index_cls([1.0, 2.0]) + assert idx.equals(idx) + assert idx.identical(idx) + + idx2 = index_cls([1.0, 2.0]) + assert idx.equals(idx2) + + idx = index_cls([1.0, np.nan]) + assert idx.equals(idx) + assert idx.identical(idx) + + idx2 = index_cls([1.0, np.nan]) + assert idx.equals(idx2) + + @pytest.mark.parametrize( + "other", + ( + Index([1, 2], dtype=np.int64), + Index([1.0, 2.0], dtype=object), + Index([1, 2], dtype=object), + ), + ) + def test_equals_numeric_other_index_type(self, other): + idx = Index([1.0, 2.0]) + assert idx.equals(other) + assert other.equals(idx) + + @pytest.mark.parametrize( + "vals", + [ + pd.date_range("2016-01-01", periods=3), + pd.timedelta_range("1 Day", periods=3), + ], + ) + def test_lookups_datetimelike_values(self, vals, dtype): + # If we have datetime64 or timedelta64 values, make sure they are + # wrapped correctly GH#31163 + ser = Series(vals, index=range(3, 6)) + ser.index = ser.index.astype(dtype) + + expected = vals[1] + + result = ser[4.0] + assert isinstance(result, type(expected)) and result == expected + result = ser[4] + assert isinstance(result, type(expected)) and result == expected + + result = ser.loc[4.0] + assert isinstance(result, type(expected)) and result == expected + result = ser.loc[4] + assert isinstance(result, type(expected)) and result == expected + + result = ser.at[4.0] + assert isinstance(result, type(expected)) and result == expected + # GH#31329 .at[4] should cast to 4.0, matching .loc behavior + result = ser.at[4] + assert isinstance(result, type(expected)) and result == expected + + result = ser.iloc[1] + assert isinstance(result, type(expected)) and result == expected + + result = ser.iat[1] + assert isinstance(result, type(expected)) and result == expected + + def test_doesnt_contain_all_the_things(self): + idx = Index([np.nan]) + assert not idx.isin([0]).item() + assert not idx.isin([1]).item() + assert idx.isin([np.nan]).item() + + def test_nan_multiple_containment(self): + index_cls = Index + + idx = index_cls([1.0, np.nan]) + tm.assert_numpy_array_equal(idx.isin([1.0]), np.array([True, False])) + tm.assert_numpy_array_equal(idx.isin([2.0, np.pi]), np.array([False, False])) + tm.assert_numpy_array_equal(idx.isin([np.nan]), np.array([False, True])) + tm.assert_numpy_array_equal(idx.isin([1.0, np.nan]), np.array([True, True])) + idx = index_cls([1.0, 2.0]) + tm.assert_numpy_array_equal(idx.isin([np.nan]), np.array([False, False])) + + def test_fillna_float64(self): + index_cls = Index + # GH 11343 + idx = Index([1.0, np.nan, 3.0], dtype=float, name="x") + # can't downcast + exp = Index([1.0, 0.1, 3.0], name="x") + tm.assert_index_equal(idx.fillna(0.1), exp, exact=True) + + # downcast + exp = index_cls([1.0, 2.0, 3.0], name="x") + tm.assert_index_equal(idx.fillna(2), exp) + + # object + exp = Index([1.0, "obj", 3.0], name="x") + tm.assert_index_equal(idx.fillna("obj"), exp, exact=True) + + def test_logical_compat(self, simple_index): + idx = simple_index + assert idx.all() == idx.values.all() + assert idx.any() == idx.values.any() + + assert idx.all() == idx.to_series().all() + assert idx.any() == idx.to_series().any() + + +class TestNumericInt: + @pytest.fixture(params=[np.int64, np.int32, np.int16, np.int8, np.uint64]) + def dtype(self, request): + return request.param + + @pytest.fixture + def simple_index(self, dtype): + return Index(range(0, 20, 2), dtype=dtype) + + def test_is_monotonic(self): + index_cls = Index + + index = index_cls([1, 2, 3, 4]) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index._is_strictly_monotonic_increasing is True + assert index.is_monotonic_decreasing is False + assert index._is_strictly_monotonic_decreasing is False + + index = index_cls([4, 3, 2, 1]) + assert index.is_monotonic_increasing is False + assert index._is_strictly_monotonic_increasing is False + assert index._is_strictly_monotonic_decreasing is True + + index = index_cls([1]) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_increasing is True + assert index._is_strictly_monotonic_decreasing is True + + def test_is_strictly_monotonic(self): + index_cls = Index + + index = index_cls([1, 1, 2, 3]) + assert index.is_monotonic_increasing is True + assert index._is_strictly_monotonic_increasing is False + + index = index_cls([3, 2, 1, 1]) + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_decreasing is False + + index = index_cls([1, 1]) + assert index.is_monotonic_increasing + assert index.is_monotonic_decreasing + assert not index._is_strictly_monotonic_increasing + assert not index._is_strictly_monotonic_decreasing + + def test_logical_compat(self, simple_index): + idx = simple_index + assert idx.all() == idx.values.all() + assert idx.any() == idx.values.any() + + def test_identical(self, simple_index, dtype): + index = simple_index + + idx = Index(index.copy()) + assert idx.identical(index) + + same_values_different_type = Index(idx, dtype=object) + assert not idx.identical(same_values_different_type) + + idx = index.astype(dtype=object) + idx = idx.rename("foo") + same_values = Index(idx, dtype=object) + assert same_values.identical(idx) + + assert not idx.identical(index) + assert Index(same_values, name="foo", dtype=object).identical(idx) + + assert not index.astype(dtype=object).identical(index.astype(dtype=dtype)) + + def test_cant_or_shouldnt_cast(self, dtype): + msg = r"invalid literal for int\(\) with base 10: 'foo'" + + # can't + data = ["foo", "bar", "baz"] + with pytest.raises(ValueError, match=msg): + Index(data, dtype=dtype) + + def test_view_index(self, simple_index): + index = simple_index + msg = "Passing a type in .*Index.view is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + index.view(Index) + + def test_prevent_casting(self, simple_index): + index = simple_index + result = index.astype("O") + assert result.dtype == np.object_ + + +class TestIntNumericIndex: + @pytest.fixture(params=[np.int64, np.int32, np.int16, np.int8]) + def dtype(self, request): + return request.param + + def test_constructor_from_list_no_dtype(self): + index = Index([1, 2, 3]) + assert index.dtype == np.int64 + + def test_constructor(self, dtype): + index_cls = Index + + # scalar raise Exception + msg = ( + rf"{index_cls.__name__}\(\.\.\.\) must be called with a collection of some " + "kind, 5 was passed" + ) + with pytest.raises(TypeError, match=msg): + index_cls(5) + + # copy + # pass list, coerce fine + index = index_cls([-5, 0, 1, 2], dtype=dtype) + arr = index.values.copy() + new_index = index_cls(arr, copy=True) + tm.assert_index_equal(new_index, index, exact=True) + val = int(arr[0]) + 3000 + + # this should not change index + if dtype != np.int8: + # NEP 50 won't allow assignment that would overflow + arr[0] = val + assert new_index[0] != val + + if dtype == np.int64: + # pass list, coerce fine + index = index_cls([-5, 0, 1, 2], dtype=dtype) + expected = Index([-5, 0, 1, 2], dtype=dtype) + tm.assert_index_equal(index, expected) + + # from iterable + index = index_cls(iter([-5, 0, 1, 2]), dtype=dtype) + expected = index_cls([-5, 0, 1, 2], dtype=dtype) + tm.assert_index_equal(index, expected, exact=True) + + # interpret list-like + expected = index_cls([5, 0], dtype=dtype) + for cls in [Index, index_cls]: + for idx in [ + cls([5, 0], dtype=dtype), + cls(np.array([5, 0]), dtype=dtype), + cls(Series([5, 0]), dtype=dtype), + ]: + tm.assert_index_equal(idx, expected) + + def test_constructor_corner(self, dtype): + index_cls = Index + + arr = np.array([1, 2, 3, 4], dtype=object) + + index = index_cls(arr, dtype=dtype) + assert index.values.dtype == index.dtype + if dtype == np.int64: + without_dtype = Index(arr) + # as of 2.0 we do not infer a dtype when we get an object-dtype + # ndarray of numbers, matching Series behavior + assert without_dtype.dtype == object + + tm.assert_index_equal(index, without_dtype.astype(np.int64)) + + # preventing casting + arr = np.array([1, "2", 3, "4"], dtype=object) + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + index_cls(arr, dtype=dtype) + + def test_constructor_coercion_signed_to_unsigned( + self, + any_unsigned_int_numpy_dtype, + ): + # see gh-15832 + msg = "|".join( + [ + "Trying to coerce negative values to unsigned integers", + "The elements provided in the data cannot all be casted", + ] + ) + with pytest.raises(OverflowError, match=msg): + Index([-1], dtype=any_unsigned_int_numpy_dtype) + + def test_constructor_np_signed(self, any_signed_int_numpy_dtype): + # GH#47475 + scalar = np.dtype(any_signed_int_numpy_dtype).type(1) + result = Index([scalar]) + expected = Index([1], dtype=any_signed_int_numpy_dtype) + tm.assert_index_equal(result, expected, exact=True) + + def test_constructor_np_unsigned(self, any_unsigned_int_numpy_dtype): + # GH#47475 + scalar = np.dtype(any_unsigned_int_numpy_dtype).type(1) + result = Index([scalar]) + expected = Index([1], dtype=any_unsigned_int_numpy_dtype) + tm.assert_index_equal(result, expected, exact=True) + + def test_coerce_list(self): + # coerce things + arr = Index([1, 2, 3, 4]) + assert isinstance(arr, Index) + + # but not if explicit dtype passed + arr = Index([1, 2, 3, 4], dtype=object) + assert type(arr) is Index + + +class TestFloat16Index: + # float 16 indexes not supported + # GH 49535 + def test_constructor(self): + index_cls = Index + dtype = np.float16 + + msg = "float16 indexes are not supported" + + # explicit construction + with pytest.raises(NotImplementedError, match=msg): + index_cls([1, 2, 3, 4, 5], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([1, 2, 3, 4, 5]), dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + + # nan handling + with pytest.raises(NotImplementedError, match=msg): + index_cls([np.nan, np.nan], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([np.nan]), dtype=dtype) + + +@pytest.mark.parametrize( + "box", + [list, lambda x: np.array(x, dtype=object), lambda x: Index(x, dtype=object)], +) +def test_uint_index_does_not_convert_to_float64(box): + # https://github.com/pandas-dev/pandas/issues/28279 + # https://github.com/pandas-dev/pandas/issues/28023 + series = Series( + [0, 1, 2, 3, 4, 5], + index=[ + 7606741985629028552, + 17876870360202815256, + 17876870360202815256, + 13106359306506049338, + 8991270399732411471, + 8991270399732411472, + ], + ) + + result = series.loc[box([7606741985629028552, 17876870360202815256])] + + expected = Index( + [7606741985629028552, 17876870360202815256, 17876870360202815256], + dtype="uint64", + ) + tm.assert_index_equal(result.index, expected) + + tm.assert_equal(result, series.iloc[:3]) + + +def test_float64_index_equals(): + # https://github.com/pandas-dev/pandas/issues/35217 + float_index = Index([1.0, 2, 3]) + string_index = Index(["1", "2", "3"]) + + result = float_index.equals(string_index) + assert result is False + + result = string_index.equals(float_index) + assert result is False + + +def test_map_dtype_inference_unsigned_to_signed(): + # GH#44609 cases where we don't retain dtype + idx = Index([1, 2, 3], dtype=np.uint64) + result = idx.map(lambda x: -x) + expected = Index([-1, -2, -3], dtype=np.int64) + tm.assert_index_equal(result, expected) + + +def test_map_dtype_inference_overflows(): + # GH#44609 case where we have to upcast + idx = Index(np.array([1, 2, 3], dtype=np.int8)) + result = idx.map(lambda x: x * 1000) + # TODO: we could plausibly try to infer down to int16 here + expected = Index([1000, 2000, 3000], dtype=np.int64) + tm.assert_index_equal(result, expected) + + +def test_view_to_datetimelike(): + # GH#55710 + idx = Index([1, 2, 3]) + res = idx.view("m8[s]") + expected = pd.TimedeltaIndex(idx.values.view("m8[s]")) + tm.assert_index_equal(res, expected) + + res2 = idx.view("m8[D]") + expected2 = idx.values.view("m8[D]") + tm.assert_numpy_array_equal(res2, expected2) + + res3 = idx.view("M8[h]") + expected3 = idx.values.view("M8[h]") + tm.assert_numpy_array_equal(res3, expected3) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_setops.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_setops.py new file mode 100644 index 0000000000000000000000000000000000000000..376b51dd98bb1b1c7c6c8a67914bc72f6c6c588d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_setops.py @@ -0,0 +1,168 @@ +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +import pandas._testing as tm +from pandas.core.indexes.api import ( + Index, + RangeIndex, +) + + +@pytest.fixture +def index_large(): + # large values used in TestUInt64Index where no compat needed with int64/float64 + large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] + return Index(large, dtype=np.uint64) + + +class TestSetOps: + @pytest.mark.parametrize("dtype", ["f8", "u8", "i8"]) + def test_union_non_numeric(self, dtype): + # corner case, non-numeric + index = Index(np.arange(5, dtype=dtype), dtype=dtype) + assert index.dtype == dtype + + other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) + result = index.union(other) + expected = Index(np.concatenate((index, other))) + tm.assert_index_equal(result, expected) + + result = other.union(index) + expected = Index(np.concatenate((other, index))) + tm.assert_index_equal(result, expected) + + def test_intersection(self): + index = Index(range(5), dtype=np.int64) + + other = Index([1, 2, 3, 4, 5]) + result = index.intersection(other) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) + tm.assert_index_equal(result, expected) + + result = other.intersection(index) + expected = Index( + np.sort(np.asarray(np.intersect1d(index.values, other.values))) + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["int64", "uint64"]) + def test_int_float_union_dtype(self, dtype): + # https://github.com/pandas-dev/pandas/issues/26778 + # [u]int | float -> float + index = Index([0, 2, 3], dtype=dtype) + other = Index([0.5, 1.5], dtype=np.float64) + expected = Index([0.0, 0.5, 1.5, 2.0, 3.0], dtype=np.float64) + result = index.union(other) + tm.assert_index_equal(result, expected) + + result = other.union(index) + tm.assert_index_equal(result, expected) + + def test_range_float_union_dtype(self): + # https://github.com/pandas-dev/pandas/issues/26778 + index = RangeIndex(start=0, stop=3) + other = Index([0.5, 1.5], dtype=np.float64) + result = index.union(other) + expected = Index([0.0, 0.5, 1, 1.5, 2.0], dtype=np.float64) + tm.assert_index_equal(result, expected) + + result = other.union(index) + tm.assert_index_equal(result, expected) + + def test_range_uint64_union_dtype(self): + # https://github.com/pandas-dev/pandas/issues/26778 + index = RangeIndex(start=0, stop=3) + other = Index([0, 10], dtype=np.uint64) + result = index.union(other) + expected = Index([0, 1, 2, 10], dtype=object) + tm.assert_index_equal(result, expected) + + result = other.union(index) + tm.assert_index_equal(result, expected) + + def test_float64_index_difference(self): + # https://github.com/pandas-dev/pandas/issues/35217 + float_index = Index([1.0, 2, 3]) + string_index = Index(["1", "2", "3"]) + + result = float_index.difference(string_index) + tm.assert_index_equal(result, float_index) + + result = string_index.difference(float_index) + tm.assert_index_equal(result, string_index) + + def test_intersection_uint64_outside_int64_range(self, index_large): + other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20]) + result = index_large.intersection(other) + expected = Index(np.sort(np.intersect1d(index_large.values, other.values))) + tm.assert_index_equal(result, expected) + + result = other.intersection(index_large) + expected = Index( + np.sort(np.asarray(np.intersect1d(index_large.values, other.values))) + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "index2,keeps_name", + [ + (Index([4, 7, 6, 5, 3], name="index"), True), + (Index([4, 7, 6, 5, 3], name="other"), False), + ], + ) + def test_intersection_monotonic(self, index2, keeps_name, sort): + index1 = Index([5, 3, 2, 4, 1], name="index") + expected = Index([5, 3, 4]) + + if keeps_name: + expected.name = "index" + + result = index1.intersection(index2, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + def test_symmetric_difference(self, sort): + # smoke + index1 = Index([5, 2, 3, 4], name="index1") + index2 = Index([2, 3, 4, 1]) + result = index1.symmetric_difference(index2, sort=sort) + expected = Index([5, 1]) + if sort is not None: + tm.assert_index_equal(result, expected) + else: + tm.assert_index_equal(result, expected.sort_values()) + assert result.name is None + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + +class TestSetOpsSort: + @pytest.mark.parametrize("slice_", [slice(None), slice(0)]) + def test_union_sort_other_special(self, slice_): + # https://github.com/pandas-dev/pandas/issues/24959 + + idx = Index([1, 0, 2]) + # default, sort=None + other = idx[slice_] + tm.assert_index_equal(idx.union(other), idx) + tm.assert_index_equal(other.union(idx), idx) + + # sort=False + tm.assert_index_equal(idx.union(other, sort=False), idx) + + @pytest.mark.parametrize("slice_", [slice(None), slice(0)]) + def test_union_sort_special_true(self, slice_): + idx = Index([1, 0, 2]) + # default, sort=None + other = idx[slice_] + + result = idx.union(other, sort=True) + expected = Index([0, 1, 2]) + tm.assert_index_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/period/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/period/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..babc0736857e063bb7534f2edf37a56d6a0877c1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/period/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/period/__pycache__/test_constructors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/period/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85e2a23199355f2e11bd0b4758db8a93712ef54c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/period/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/period/__pycache__/test_setops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/period/__pycache__/test_setops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2c032467ba253cc0ced4055aa8cb1bcd11791e2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/period/__pycache__/test_setops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/__init__.py b/llmeval-env/lib/python3.10/site-packages/safetensors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a5d2ca92b5248ce798a19f8e14c3492992cae1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors/__init__.py @@ -0,0 +1,9 @@ +# Re-export this +from ._safetensors_rust import ( # noqa: F401 + SafetensorError, + __version__, + deserialize, + safe_open, + serialize, + serialize_file, +) diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/safetensors/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a125e8de683c424db86d8edd9a301ced30d56296 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors/__init__.pyi @@ -0,0 +1,73 @@ +# Generated content DO NOT EDIT +@staticmethod +def deserialize(bytes): + """ + Opens a safetensors lazily and returns tensors as asked + + Args: + data (:obj:`bytes`): + The byte content of a file + + Returns: + (:obj:`List[str, Dict[str, Dict[str, any]]]`): + The deserialized content is like: + [("tensor_name", {"shape": [2, 3], "dtype": "F32", "data": b"\0\0.." }), (...)] + """ + pass + +@staticmethod +def serialize(tensor_dict, metadata=None): + """ + Serializes raw data. + + Args: + tensor_dict (:obj:`Dict[str, Dict[Any]]`): + The tensor dict is like: + {"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}} + metadata (:obj:`Dict[str, str]`, *optional*): + The optional purely text annotations + + Returns: + (:obj:`bytes`): + The serialized content. + """ + pass + +@staticmethod +def serialize_file(tensor_dict, filename, metadata=None): + """ + Serializes raw data. + + Args: + tensor_dict (:obj:`Dict[str, Dict[Any]]`): + The tensor dict is like: + {"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}} + filename (:obj:`str`): + The name of the file to write into. + metadata (:obj:`Dict[str, str]`, *optional*): + The optional purely text annotations + + Returns: + (:obj:`bytes`): + The serialized content. + """ + pass + +class safe_open: + """ + Opens a safetensors lazily and returns tensors as asked + + Args: + filename (:obj:`str`): + The filename to open + + framework (:obj:`str`): + The framework you want you tensors in. Supported values: + `pt`, `tf`, `flax`, `numpy`. + + device (:obj:`str`, defaults to :obj:`"cpu"`): + The device on which you want the tensors. + """ + + def __init__(self, filename, framework, device="cpu"): + pass diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/mlx.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/mlx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e495c1ad83387664547eb411c5d946f7c1cad4eb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/mlx.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01dc3bca5d27feaef486fba6c6454f31def2c8ba Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6da4c3a00fac60af74b6f992b7bcdc699f7f9443 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/flax.py b/llmeval-env/lib/python3.10/site-packages/safetensors/flax.py new file mode 100644 index 0000000000000000000000000000000000000000..d0b8375e038eff487af33fcfaa4a597aacb5743f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors/flax.py @@ -0,0 +1,138 @@ +import os +from typing import Dict, Optional, Union + +import numpy as np + +import jax.numpy as jnp +from jax import Array +from safetensors import numpy, safe_open + + +def save(tensors: Dict[str, Array], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, Array]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.flax import save + from jax import numpy as jnp + + tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + np_tensors = _jnp2np(tensors) + return numpy.save(np_tensors, metadata=metadata) + + +def save_file( + tensors: Dict[str, Array], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, Array]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.flax import save_file + from jax import numpy as jnp + + tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + np_tensors = _jnp2np(tensors) + return numpy.save_file(np_tensors, filename, metadata=metadata) + + +def load(data: bytes) -> Dict[str, Array]: + """ + Loads a safetensors file into flax format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, Array]`: dictionary that contains name as key, value as `Array` on cpu + + Example: + + ```python + from safetensors.flax import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = numpy.load(data) + return _np2jnp(flat) + + +def load_file(filename: Union[str, os.PathLike]) -> Dict[str, Array]: + """ + Loads a safetensors file into flax format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + + Returns: + `Dict[str, Array]`: dictionary that contains name as key, value as `Array` + + Example: + + ```python + from safetensors.flax import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="flax") as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +def _np2jnp(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, Array]: + for k, v in numpy_dict.items(): + numpy_dict[k] = jnp.array(v) + return numpy_dict + + +def _jnp2np(jnp_dict: Dict[str, Array]) -> Dict[str, np.array]: + for k, v in jnp_dict.items(): + jnp_dict[k] = np.asarray(v) + return jnp_dict diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/numpy.py b/llmeval-env/lib/python3.10/site-packages/safetensors/numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..0b245f12c1c949456c9b2edb45a11343e6a8099a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors/numpy.py @@ -0,0 +1,176 @@ +import os +import sys +from typing import Dict, Optional, Union + +import numpy as np + +from safetensors import deserialize, safe_open, serialize, serialize_file + + +def _tobytes(tensor: np.ndarray) -> bytes: + if not _is_little_endian(tensor): + tensor = tensor.byteswap(inplace=False) + return tensor.tobytes() + + +def save(tensor_dict: Dict[str, np.ndarray], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensor_dict (`Dict[str, np.ndarray]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.numpy import save + import numpy as np + + tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()} + serialized = serialize(flattened, metadata=metadata) + result = bytes(serialized) + return result + + +def save_file( + tensor_dict: Dict[str, np.ndarray], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]] = None +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensor_dict (`Dict[str, np.ndarray]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.numpy import save_file + import numpy as np + + tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()} + serialize_file(flattened, filename, metadata=metadata) + + +def load(data: bytes) -> Dict[str, np.ndarray]: + """ + Loads a safetensors file into numpy format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray` on cpu + + Example: + + ```python + from safetensors.numpy import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = deserialize(data) + return _view2np(flat) + + +def load_file(filename: Union[str, os.PathLike]) -> Dict[str, np.ndarray]: + """ + Loads a safetensors file into numpy format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + + Returns: + `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray` + + Example: + + ```python + from safetensors.numpy import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="np") as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +_TYPES = { + "F64": np.float64, + "F32": np.float32, + "F16": np.float16, + "I64": np.int64, + "U64": np.uint64, + "I32": np.int32, + "U32": np.uint32, + "I16": np.int16, + "U16": np.uint16, + "I8": np.int8, + "U8": np.uint8, + "BOOL": bool, +} + + +def _getdtype(dtype_str: str) -> np.dtype: + return _TYPES[dtype_str] + + +def _view2np(safeview) -> Dict[str, np.ndarray]: + result = {} + for k, v in safeview: + dtype = _getdtype(v["dtype"]) + arr = np.frombuffer(v["data"], dtype=dtype).reshape(v["shape"]) + result[k] = arr + return result + + +def _is_little_endian(tensor: np.ndarray) -> bool: + byteorder = tensor.dtype.byteorder + if byteorder == "=": + if sys.byteorder == "little": + return True + else: + return False + elif byteorder == "|": + return True + elif byteorder == "<": + return True + elif byteorder == ">": + return False + raise ValueError(f"Unexpected byte order {byteorder}") diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/paddle.py b/llmeval-env/lib/python3.10/site-packages/safetensors/paddle.py new file mode 100644 index 0000000000000000000000000000000000000000..cec368665de31d17757c0c6621df5dc4926bfab1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors/paddle.py @@ -0,0 +1,138 @@ +import os +from typing import Dict, Optional, Union + +import numpy as np + +import paddle +from safetensors import numpy + + +def save(tensors: Dict[str, paddle.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, paddle.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.paddle import save + import paddle + + tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + np_tensors = _paddle2np(tensors) + return numpy.save(np_tensors, metadata=metadata) + + +def save_file( + tensors: Dict[str, paddle.Tensor], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, paddle.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.paddle import save_file + import paddle + + tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + np_tensors = _paddle2np(tensors) + return numpy.save_file(np_tensors, filename, metadata=metadata) + + +def load(data: bytes, device: str = "cpu") -> Dict[str, paddle.Tensor]: + """ + Loads a safetensors file into paddle format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor` on cpu + + Example: + + ```python + from safetensors.paddle import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = numpy.load(data) + return _np2paddle(flat, device) + + +def load_file(filename: Union[str, os.PathLike], device="cpu") -> Dict[str, paddle.Tensor]: + """ + Loads a safetensors file into paddle format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + device (`Union[Dict[str, any], str]`, *optional*, defaults to `cpu`): + The device where the tensors need to be located after load. + available options are all regular paddle device locations + + Returns: + `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor` + + Example: + + ```python + from safetensors.paddle import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + flat = numpy.load_file(filename) + output = _np2paddle(flat, device) + return output + + +def _np2paddle(numpy_dict: Dict[str, np.ndarray], device: str = "cpu") -> Dict[str, paddle.Tensor]: + for k, v in numpy_dict.items(): + numpy_dict[k] = paddle.to_tensor(v, place=device) + return numpy_dict + + +def _paddle2np(paddle_dict: Dict[str, paddle.Tensor]) -> Dict[str, np.array]: + for k, v in paddle_dict.items(): + paddle_dict[k] = v.detach().cpu().numpy() + return paddle_dict diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/py.typed b/llmeval-env/lib/python3.10/site-packages/safetensors/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/tensorflow.py b/llmeval-env/lib/python3.10/site-packages/safetensors/tensorflow.py new file mode 100644 index 0000000000000000000000000000000000000000..e2d74b0522698b3748a7da93753e065f4053beea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors/tensorflow.py @@ -0,0 +1,137 @@ +import os +from typing import Dict, Optional, Union + +import numpy as np +import tensorflow as tf + +from safetensors import numpy, safe_open + + +def save(tensors: Dict[str, tf.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, tf.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.tensorflow import save + import tensorflow as tf + + tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + np_tensors = _tf2np(tensors) + return numpy.save(np_tensors, metadata=metadata) + + +def save_file( + tensors: Dict[str, tf.Tensor], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, tf.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.tensorflow import save_file + import tensorflow as tf + + tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + np_tensors = _tf2np(tensors) + return numpy.save_file(np_tensors, filename, metadata=metadata) + + +def load(data: bytes) -> Dict[str, tf.Tensor]: + """ + Loads a safetensors file into tensorflow format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor` on cpu + + Example: + + ```python + from safetensors.tensorflow import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = numpy.load(data) + return _np2tf(flat) + + +def load_file(filename: Union[str, os.PathLike]) -> Dict[str, tf.Tensor]: + """ + Loads a safetensors file into tensorflow format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + + Returns: + `Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor` + + Example: + + ```python + from safetensors.tensorflow import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="tf") as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +def _np2tf(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, tf.Tensor]: + for k, v in numpy_dict.items(): + numpy_dict[k] = tf.convert_to_tensor(v) + return numpy_dict + + +def _tf2np(tf_dict: Dict[str, tf.Tensor]) -> Dict[str, np.array]: + for k, v in tf_dict.items(): + tf_dict[k] = v.numpy() + return tf_dict diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/torch.py b/llmeval-env/lib/python3.10/site-packages/safetensors/torch.py new file mode 100644 index 0000000000000000000000000000000000000000..5d98bac4a8055a17d27e16caa5da816c7c9bb496 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors/torch.py @@ -0,0 +1,495 @@ +import os +import sys +from collections import defaultdict +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import torch + +from safetensors import deserialize, safe_open, serialize, serialize_file + + +def storage_ptr(tensor: torch.Tensor) -> int: + try: + return tensor.untyped_storage().data_ptr() + except Exception: + # Fallback for torch==1.10 + try: + return tensor.storage().data_ptr() + except NotImplementedError: + # Fallback for meta storage + return 0 + + +def _end_ptr(tensor: torch.Tensor) -> int: + if tensor.nelement(): + stop = tensor.view(-1)[-1].data_ptr() + _SIZE[tensor.dtype] + else: + stop = tensor.data_ptr() + return stop + + +def storage_size(tensor: torch.Tensor) -> int: + try: + return tensor.untyped_storage().nbytes() + except AttributeError: + # Fallback for torch==1.10 + try: + return tensor.storage().size() * _SIZE[tensor.dtype] + except NotImplementedError: + # Fallback for meta storage + # On torch >=2.0 this is the tensor size + return tensor.nelement() * _SIZE[tensor.dtype] + + +def _filter_shared_not_shared(tensors: List[Set[str]], state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]: + filtered_tensors = [] + for shared in tensors: + if len(shared) < 2: + filtered_tensors.append(shared) + continue + + areas = [] + for name in shared: + tensor = state_dict[name] + areas.append((tensor.data_ptr(), _end_ptr(tensor), name)) + areas.sort() + + _, last_stop, last_name = areas[0] + filtered_tensors.append({last_name}) + for start, stop, name in areas[1:]: + if start >= last_stop: + filtered_tensors.append({name}) + else: + filtered_tensors[-1].add(name) + last_stop = stop + + return filtered_tensors + + +def _find_shared_tensors(state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]: + tensors = defaultdict(set) + for k, v in state_dict.items(): + if v.device != torch.device("meta") and storage_ptr(v) != 0 and storage_size(v) != 0: + # Need to add device as key because of multiple GPU. + tensors[(v.device, storage_ptr(v), storage_size(v))].add(k) + tensors = list(sorted(tensors.values())) + tensors = _filter_shared_not_shared(tensors, state_dict) + return tensors + + +def _is_complete(tensor: torch.Tensor) -> bool: + return tensor.data_ptr() == storage_ptr(tensor) and tensor.nelement() * _SIZE[tensor.dtype] == storage_size(tensor) + + +def _remove_duplicate_names( + state_dict: Dict[str, torch.Tensor], + *, + preferred_names: Optional[List[str]] = None, + discard_names: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + if preferred_names is None: + preferred_names = [] + preferred_names = set(preferred_names) + if discard_names is None: + discard_names = [] + discard_names = set(discard_names) + + shareds = _find_shared_tensors(state_dict) + to_remove = defaultdict(list) + for shared in shareds: + complete_names = set([name for name in shared if _is_complete(state_dict[name])]) + if not complete_names: + raise RuntimeError( + "Error while trying to find names to remove to save state dict, but found no suitable name to keep" + f" for saving amongst: {shared}. None is covering the entire storage.Refusing to save/load the model" + " since you could be storing much more memory than needed. Please refer to" + " https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an" + " issue." + ) + + keep_name = sorted(list(complete_names))[0] + + # Mechanism to preferentially select keys to keep + # coming from the on-disk file to allow + # loading models saved with a different choice + # of keep_name + preferred = complete_names.difference(discard_names) + if preferred: + keep_name = sorted(list(preferred))[0] + + if preferred_names: + preferred = preferred_names.intersection(complete_names) + if preferred: + keep_name = sorted(list(preferred))[0] + for name in sorted(shared): + if name != keep_name: + to_remove[keep_name].append(name) + return to_remove + + +def save_model( + model: torch.nn.Module, filename: str, metadata: Optional[Dict[str, str]] = None, force_contiguous: bool = True +): + """ + Saves a given torch model to specified filename. + This method exists specifically to avoid tensor sharing issues which are + not allowed in `safetensors`. [More information on tensor sharing](../torch_shared_tensors) + + Args: + model (`torch.nn.Module`): + The model to save on disk. + filename (`str`): + The filename location to save the file + metadata (`Dict[str, str]`, *optional*): + Extra information to save along with the file. + Some metadata will be added for each dropped tensors. + This information will not be enough to recover the entire + shared structure but might help understanding things + force_contiguous (`boolean`, *optional*, defaults to True): + Forcing the state_dict to be saved as contiguous tensors. + This has no effect on the correctness of the model, but it + could potentially change performance if the layout of the tensor + was chosen specifically for that reason. + """ + state_dict = model.state_dict() + to_removes = _remove_duplicate_names(state_dict) + + for kept_name, to_remove_group in to_removes.items(): + for to_remove in to_remove_group: + if metadata is None: + metadata = {} + + if to_remove not in metadata: + # Do not override user data + metadata[to_remove] = kept_name + del state_dict[to_remove] + if force_contiguous: + state_dict = {k: v.contiguous() for k, v in state_dict.items()} + try: + save_file(state_dict, filename, metadata=metadata) + except ValueError as e: + msg = str(e) + msg += " Or use save_model(..., force_contiguous=True), read the docs for potential caveats." + raise ValueError(msg) + + +def load_model(model: torch.nn.Module, filename: Union[str, os.PathLike], strict: bool = True, device: Union[str, int] = "cpu") -> Tuple[List[str], List[str]]: + """ + Loads a given filename onto a torch model. + This method exists specifically to avoid tensor sharing issues which are + not allowed in `safetensors`. [More information on tensor sharing](../torch_shared_tensors) + + Args: + model (`torch.nn.Module`): + The model to load onto. + filename (`str`, or `os.PathLike`): + The filename location to load the file from. + strict (`bool`, *optional*, defaults to True): + Whether to fail if you're missing keys or having unexpected ones. + When false, the function simply returns missing and unexpected names. + device (`Union[str, int]`, *optional*, defaults to `cpu`): + The device where the tensors need to be located after load. + available options are all regular torch device locations. + + Returns: + `(missing, unexpected): (List[str], List[str])` + `missing` are names in the model which were not modified during loading + `unexpected` are names that are on the file, but weren't used during + the load. + """ + state_dict = load_file(filename, device=device) + model_state_dict = model.state_dict() + to_removes = _remove_duplicate_names(model_state_dict, preferred_names=state_dict.keys()) + missing, unexpected = model.load_state_dict(state_dict, strict=False) + missing = set(missing) + for to_remove_group in to_removes.values(): + for to_remove in to_remove_group: + if to_remove not in missing: + unexpected.append(to_remove) + else: + missing.remove(to_remove) + if strict and (missing or unexpected): + missing_keys = ", ".join([f'"{k}"' for k in sorted(missing)]) + unexpected_keys = ", ".join([f'"{k}"' for k in sorted(unexpected)]) + error = f"Error(s) in loading state_dict for {model.__class__.__name__}:" + if missing: + error += f"\n Missing key(s) in state_dict: {missing_keys}" + if unexpected: + error += f"\n Unexpected key(s) in state_dict: {unexpected_keys}" + raise RuntimeError(error) + return missing, unexpected + + +def save(tensors: Dict[str, torch.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, torch.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.torch import save + import torch + + tensors = {"embedding": torch.zeros((512, 1024)), "attention": torch.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + serialized = serialize(_flatten(tensors), metadata=metadata) + result = bytes(serialized) + return result + + +def save_file( + tensors: Dict[str, torch.Tensor], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +): + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, torch.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.torch import save_file + import torch + + tensors = {"embedding": torch.zeros((512, 1024)), "attention": torch.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + serialize_file(_flatten(tensors), filename, metadata=metadata) + + +def load_file(filename: Union[str, os.PathLike], device: Union[str, int] = "cpu") -> Dict[str, torch.Tensor]: + """ + Loads a safetensors file into torch format. + + Args: + filename (`str`, or `os.PathLike`): + The name of the file which contains the tensors + device (`Union[str, int]`, *optional*, defaults to `cpu`): + The device where the tensors need to be located after load. + available options are all regular torch device locations. + + Returns: + `Dict[str, torch.Tensor]`: dictionary that contains name as key, value as `torch.Tensor` + + Example: + + ```python + from safetensors.torch import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="pt", device=device) as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +def load(data: bytes) -> Dict[str, torch.Tensor]: + """ + Loads a safetensors file into torch format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, torch.Tensor]`: dictionary that contains name as key, value as `torch.Tensor` on cpu + + Example: + + ```python + from safetensors.torch import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = deserialize(data) + return _view2torch(flat) + +# torch.float8 formats require 2.1; we do not support these dtypes on earlier versions +_float8_e4m3fn = getattr(torch, "float8_e4m3fn", None) +_float8_e5m2 = getattr(torch, "float8_e5m2", None) + +_SIZE = { + torch.int64: 8, + torch.float32: 4, + torch.int32: 4, + torch.bfloat16: 2, + torch.float16: 2, + torch.int16: 2, + torch.uint8: 1, + torch.int8: 1, + torch.bool: 1, + torch.float64: 8, + _float8_e4m3fn: 1, + _float8_e5m2: 1, +} + +_TYPES = { + "F64": torch.float64, + "F32": torch.float32, + "F16": torch.float16, + "BF16": torch.bfloat16, + "I64": torch.int64, + # "U64": torch.uint64, + "I32": torch.int32, + # "U32": torch.uint32, + "I16": torch.int16, + # "U16": torch.uint16, + "I8": torch.int8, + "U8": torch.uint8, + "BOOL": torch.bool, + "F8_E4M3": _float8_e4m3fn, + "F8_E5M2": _float8_e5m2, +} + + +def _getdtype(dtype_str: str) -> torch.dtype: + return _TYPES[dtype_str] + + +def _view2torch(safeview) -> Dict[str, torch.Tensor]: + result = {} + for k, v in safeview: + dtype = _getdtype(v["dtype"]) + arr = torch.frombuffer(v["data"], dtype=dtype).reshape(v["shape"]) + if sys.byteorder == "big": + arr = torch.from_numpy(arr.numpy().byteswap(inplace=False)) + result[k] = arr + + return result + + +def _tobytes(tensor: torch.Tensor, name: str) -> bytes: + if tensor.layout != torch.strided: + raise ValueError( + f"You are trying to save a sparse tensor: `{name}` which this library does not support." + " You can make it a dense tensor before saving with `.to_dense()` but be aware this might" + " make a much larger file than needed." + ) + + if not tensor.is_contiguous(): + raise ValueError( + f"You are trying to save a non contiguous tensor: `{name}` which is not allowed. It either means you" + " are trying to save tensors which are reference of each other in which case it's recommended to save" + " only the full tensors, and reslice at load time, or simply call `.contiguous()` on your tensor to" + " pack it before saving." + ) + if tensor.device.type != "cpu": + # Moving tensor to cpu before saving + tensor = tensor.to("cpu") + + import ctypes + + import numpy as np + + # When shape is empty (scalar), np.prod returns a float + # we need a int for the following calculations + length = int(np.prod(tensor.shape).item()) + bytes_per_item = _SIZE[tensor.dtype] + + total_bytes = length * bytes_per_item + + ptr = tensor.data_ptr() + if ptr == 0: + return b"" + newptr = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_ubyte)) + data = np.ctypeslib.as_array(newptr, (total_bytes,)) # no internal copy + if sys.byteorder == "big": + NPDTYPES = { + torch.int64: np.int64, + torch.float32: np.float32, + torch.int32: np.int32, + # XXX: This is ok because both have the same width + torch.bfloat16: np.float16, + torch.float16: np.float16, + torch.int16: np.int16, + torch.uint8: np.uint8, + torch.int8: np.int8, + torch.bool: bool, + torch.float64: np.float64, + # XXX: This is ok because both have the same width and byteswap is a no-op anyway + _float8_e4m3fn: np.uint8, + _float8_e5m2: np.uint8, + } + npdtype = NPDTYPES[tensor.dtype] + # Not in place as that would potentially modify a live running model + data = data.view(npdtype).byteswap(inplace=False) + return data.tobytes() + + +def _flatten(tensors: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, Any]]: + if not isinstance(tensors, dict): + raise ValueError(f"Expected a dict of [str, torch.Tensor] but received {type(tensors)}") + + invalid_tensors = [] + for k, v in tensors.items(): + if not isinstance(v, torch.Tensor): + raise ValueError(f"Key `{k}` is invalid, expected torch.Tensor but received {type(v)}") + + if v.layout != torch.strided: + invalid_tensors.append(k) + if invalid_tensors: + raise ValueError( + f"You are trying to save a sparse tensors: `{invalid_tensors}` which this library does not support." + " You can make it a dense tensor before saving with `.to_dense()` but be aware this might" + " make a much larger file than needed." + ) + + shared_pointers = _find_shared_tensors(tensors) + failing = [] + for names in shared_pointers: + if len(names) > 1: + failing.append(names) + + if failing: + raise RuntimeError( + f""" + Some tensors share memory, this will lead to duplicate memory on disk and potential differences when loading them again: {failing}. + A potential way to correctly save your model is to use `save_model`. + More information at https://huggingface.co/docs/safetensors/torch_shared_tensors + """ + ) + + return { + k: { + "dtype": str(v.dtype).split(".")[-1], + "shape": v.shape, + "data": _tobytes(v, k), + } + for k, v in tensors.items() + }