diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_118_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_118_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..a860e07780cc0381e1ed45406018a8f076c1712f --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_118_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11ce736f46139e8bb1a4ca551c21a1b16c3d2c0082d406e446b57b621a2f0b66 +size 41830212 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_130_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_130_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..f1489812868bd4fd138507623e569f9a3a7f2f15 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_130_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e605a188bb4dbf418021f1328676b909121c48cbe680b92bea9888aa6c355831 +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_151_mp_rank_00_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_151_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..e1335b16eec909e4f19d11827ac33b42b109ee6a --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_151_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7c1f1fd247ecf958bbc383572b994d0e9c9d7c5c9ae9ca9107f60b92a9688cd +size 41830212 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_206_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_206_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5ed362c2a701c049dfcc17bb3ea83fd5f4332439 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_206_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f2cadbb259049f17448810ebca1523d800cfee80ec14acafcd379fe1e2854cb +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_226_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_226_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..7da2276322e0d106c25b9f8676582ace304ebaef --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_226_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da37dcda622caaa5a9a392c6b00284a8f97a2c244a7be4684f5fabdec35f6df7 +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_9_mp_rank_00_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_9_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..69ad617c7bfb5dc62faf9a431d73ce060a9ffcb2 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_9_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4cc02a48e597e11d876fc659787e1d604c4544be0332dfac1007b227f914b68 +size 41830192 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_algos.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_algos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc8a2aeca0e277f9bbe2eb71671c2c46c4b8f998 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_algos.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_analytics.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_analytics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a31aa177f1f52841cd6b21211d3a8a663bae16d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_analytics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c936267f13fcd781fcc645e1133690801e9eb62 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7f6e2082e2905b1392ef0995edf47961a480a74 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_map.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..455bb853c9046d9c51604b01f23c33f7eafe9c77 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_map.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_missing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae6f966057dd2ed296f2106514c7c5d9461700d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_missing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_operators.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e94887c14a513b6c47db8f1f4849114ce04988c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_operators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_sorting.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_sorting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a64fad6d193aed6b06fbe0f46cbd8b7d86f0cb27 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_sorting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_take.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_take.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88ea52933c28a654ea311f69fc97ee5e478edfb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_take.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_warnings.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..68c59706a6c3bf93908108c337b51c8da187cbb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_warnings.py @@ -0,0 +1,19 @@ +import pytest + +import pandas._testing as tm + + +class TestCategoricalWarnings: + def test_tab_complete_warning(self, ip): + # https://github.com/pandas-dev/pandas/issues/16409 + pytest.importorskip("IPython", minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + code = "import pandas as pd; c = pd.Categorical([])" + ip.run_cell(code) + + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None, raise_on_extra_warnings=False): + with provisionalcompleter("ignore"): + list(ip.Completer.completions("c.", 1)) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8261989805bfddf4512df07ba56c935aa87347f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abed4e460d0afa6a976129f7c72dc2593e830294 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_arithmetic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79316fc404da4dda37c5692288e95a59fda8f7e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_astype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..632e9533cdf8c02a7613f0bed632a8a647e96126 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_astype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_comparison.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_comparison.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1e01f537ed85f231279315a69126a6a304da9af Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_comparison.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_concat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_concat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..640c3138d87479d7bf77d9c0e57004adb9b3f0a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_concat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_construction.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_construction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcbe0563a4c866930a0075ad02c683a36547c19f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_construction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_contains.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_contains.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..450656b7e5e495b7fbdcfdaa974497e8ed085bc3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_contains.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a20953861557c4ee84230a7e85abae6d1eac6a96 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_repr.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_repr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..737aa80db218c7ec149cc234636db3fb0affe5ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_repr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_to_numpy.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_to_numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbe20e193edae46ca4236fc0dd07dbec3cc21ee7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/__pycache__/test_to_numpy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..5e971c66029d5ba90ecaa5eb3437246f1548557a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/conftest.py @@ -0,0 +1,48 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.floating import ( + Float32Dtype, + Float64Dtype, +) + + +@pytest.fixture(params=[Float32Dtype, Float64Dtype]) +def dtype(request): + """Parametrized fixture returning a float 'dtype'""" + return request.param() + + +@pytest.fixture +def data(dtype): + """Fixture returning 'data' array according to parametrized float 'dtype'""" + return pd.array( + list(np.arange(0.1, 0.9, 0.1)) + + [pd.NA] + + list(np.arange(1, 9.8, 0.1)) + + [pd.NA] + + [9.9, 10.0], + dtype=dtype, + ) + + +@pytest.fixture +def data_missing(dtype): + """ + Fixture returning array with missing data according to parametrized float + 'dtype'. + """ + return pd.array([np.nan, 0.1], dtype=dtype) + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture returning 'data' or 'data_missing' float arrays. + + Used to test dtype conversion with and without missing values. + """ + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_arithmetic.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..ba081bd01062a1ba59d0b51fdb4d9a1149717a01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_arithmetic.py @@ -0,0 +1,244 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray + +# Basic test for the arithmetic array ops +# ----------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "opname, exp", + [ + ("add", [1.1, 2.2, None, None, 5.5]), + ("mul", [0.1, 0.4, None, None, 2.5]), + ("sub", [0.9, 1.8, None, None, 4.5]), + ("truediv", [10.0, 10.0, None, None, 10.0]), + ("floordiv", [9.0, 9.0, None, None, 10.0]), + ("mod", [0.1, 0.2, None, None, 0.0]), + ], + ids=["add", "mul", "sub", "div", "floordiv", "mod"], +) +def test_array_op(dtype, opname, exp): + a = pd.array([1.0, 2.0, None, 4.0, 5.0], dtype=dtype) + b = pd.array([0.1, 0.2, 0.3, None, 0.5], dtype=dtype) + + op = getattr(operator, opname) + + result = op(a, b) + expected = pd.array(exp, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)]) +def test_divide_by_zero(dtype, zero, negative): + # TODO pending NA/NaN discussion + # https://github.com/pandas-dev/pandas/issues/32265/ + a = pd.array([0, 1, -1, None], dtype=dtype) + result = a / zero + expected = FloatingArray( + np.array([np.nan, np.inf, -np.inf, np.nan], dtype=dtype.numpy_dtype), + np.array([False, False, False, True]), + ) + if negative: + expected *= -1 + tm.assert_extension_array_equal(result, expected) + + +def test_pow_scalar(dtype): + a = pd.array([-1, 0, 1, None, 2], dtype=dtype) + result = a**0 + expected = pd.array([1, 1, 1, 1, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = a**1 + expected = pd.array([-1, 0, 1, None, 2], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = a**pd.NA + expected = pd.array([None, None, 1, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = a**np.nan + # TODO np.nan should be converted to pd.NA / missing before operation? + expected = FloatingArray( + np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype=dtype.numpy_dtype), + mask=a._mask, + ) + tm.assert_extension_array_equal(result, expected) + + # reversed + a = a[1:] # Can't raise integers to negative powers. + + result = 0**a + expected = pd.array([1, 0, None, 0], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = 1**a + expected = pd.array([1, 1, 1, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = pd.NA**a + expected = pd.array([1, None, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = np.nan**a + expected = FloatingArray( + np.array([1, np.nan, np.nan, np.nan], dtype=dtype.numpy_dtype), mask=a._mask + ) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_array(dtype): + a = pd.array([0, 0, 0, 1, 1, 1, None, None, None], dtype=dtype) + b = pd.array([0, 1, None, 0, 1, None, 0, 1, None], dtype=dtype) + result = a**b + expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_rpow_one_to_na(): + # https://github.com/pandas-dev/pandas/issues/22022 + # https://github.com/pandas-dev/pandas/issues/29997 + arr = pd.array([np.nan, np.nan], dtype="Float64") + result = np.array([1.0, 2.0]) ** arr + expected = pd.array([1.0, np.nan], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("other", [0, 0.5]) +def test_arith_zero_dim_ndarray(other): + arr = pd.array([1, None, 2], dtype="Float64") + result = arr + np.array(other) + expected = arr + other + tm.assert_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_values(data, all_arithmetic_operators, using_infer_string): + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + + if using_infer_string: + import pyarrow as pa + + errs = (TypeError, pa.lib.ArrowNotImplementedError, NotImplementedError) + else: + errs = TypeError + + # invalid scalars + msg = "|".join( + [ + r"can only perform ops with numeric values", + r"FloatingArray cannot perform the operation mod", + "unsupported operand type", + "not all arguments converted during string formatting", + "can't multiply sequence by non-int of type 'float'", + "ufunc 'subtract' cannot use operands with types dtype", + r"can only concatenate str \(not \"float\"\) to str", + "ufunc '.*' not supported for the input types, and the inputs could not", + "ufunc '.*' did not contain a loop with signature matching types", + "Concatenation operation is not implemented for NumPy arrays", + "has no kernel", + "not implemented", + ] + ) + with pytest.raises(errs, match=msg): + ops("foo") + with pytest.raises(errs, match=msg): + ops(pd.Timestamp("20180101")) + + # invalid array-likes + with pytest.raises(errs, match=msg): + ops(pd.Series("foo", index=s.index)) + + msg = "|".join( + [ + "can only perform ops with numeric values", + "cannot perform .* with this index type: DatetimeArray", + "Addition/subtraction of integers and integer-arrays " + "with DatetimeArray is no longer supported. *", + "unsupported operand type", + "not all arguments converted during string formatting", + "can't multiply sequence by non-int of type 'float'", + "ufunc 'subtract' cannot use operands with types dtype", + ( + "ufunc 'add' cannot use operands with types " + rf"dtype\('{tm.ENDIAN}M8\[ns\]'\)" + ), + r"ufunc 'add' cannot use operands with types dtype\('float\d{2}'\)", + "cannot subtract DatetimeArray from ndarray", + "has no kernel", + "not implemented", + ] + ) + with pytest.raises(errs, match=msg): + ops(pd.Series(pd.date_range("20180101", periods=len(s)))) + + +# Various +# ----------------------------------------------------------------------------- + + +def test_cross_type_arithmetic(): + df = pd.DataFrame( + { + "A": pd.array([1, 2, np.nan], dtype="Float64"), + "B": pd.array([1, np.nan, 3], dtype="Float32"), + "C": np.array([1, 2, 3], dtype="float64"), + } + ) + + result = df.A + df.C + expected = pd.Series([2, 4, np.nan], dtype="Float64") + tm.assert_series_equal(result, expected) + + result = (df.A + df.C) * 3 == 12 + expected = pd.Series([False, True, None], dtype="boolean") + tm.assert_series_equal(result, expected) + + result = df.A + df.B + expected = pd.Series([2, np.nan, np.nan], dtype="Float64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "source, neg_target, abs_target", + [ + ([1.1, 2.2, 3.3], [-1.1, -2.2, -3.3], [1.1, 2.2, 3.3]), + ([1.1, 2.2, None], [-1.1, -2.2, None], [1.1, 2.2, None]), + ([-1.1, 0.0, 1.1], [1.1, 0.0, -1.1], [1.1, 0.0, 1.1]), + ], +) +def test_unary_float_operators(float_ea_dtype, source, neg_target, abs_target): + # GH38794 + dtype = float_ea_dtype + arr = pd.array(source, dtype=dtype) + neg_result, pos_result, abs_result = -arr, +arr, abs(arr) + neg_target = pd.array(neg_target, dtype=dtype) + abs_target = pd.array(abs_target, dtype=dtype) + + tm.assert_extension_array_equal(neg_result, neg_target) + tm.assert_extension_array_equal(pos_result, arr) + assert not tm.shares_memory(pos_result, arr) + tm.assert_extension_array_equal(abs_result, abs_target) + + +def test_bitwise(dtype): + left = pd.array([1, None, 3, 4], dtype=dtype) + right = pd.array([None, 3, 5, 4], dtype=dtype) + + with pytest.raises(TypeError, match="unsupported operand type"): + left | right + with pytest.raises(TypeError, match="unsupported operand type"): + left & right + with pytest.raises(TypeError, match="unsupported operand type"): + left ^ right diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_astype.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..ade3dbd2c99da32bffa9091bd4c3c2b52f7bd5de --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_astype.py @@ -0,0 +1,128 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_astype(): + # with missing values + arr = pd.array([0.1, 0.2, None], dtype="Float64") + + with pytest.raises(ValueError, match="cannot convert NA to integer"): + arr.astype("int64") + + with pytest.raises(ValueError, match="cannot convert float NaN to bool"): + arr.astype("bool") + + result = arr.astype("float64") + expected = np.array([0.1, 0.2, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + # no missing values + arr = pd.array([0.0, 1.0, 0.5], dtype="Float64") + result = arr.astype("int64") + expected = np.array([0, 1, 0], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype("bool") + expected = np.array([False, True, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_to_floating_array(): + # astype to FloatingArray + arr = pd.array([0.0, 1.0, None], dtype="Float64") + + result = arr.astype("Float64") + tm.assert_extension_array_equal(result, arr) + result = arr.astype(pd.Float64Dtype()) + tm.assert_extension_array_equal(result, arr) + result = arr.astype("Float32") + expected = pd.array([0.0, 1.0, None], dtype="Float32") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_to_boolean_array(): + # astype to BooleanArray + arr = pd.array([0.0, 1.0, None], dtype="Float64") + + result = arr.astype("boolean") + expected = pd.array([False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = arr.astype(pd.BooleanDtype()) + tm.assert_extension_array_equal(result, expected) + + +def test_astype_to_integer_array(): + # astype to IntegerArray + arr = pd.array([0.0, 1.5, None], dtype="Float64") + + result = arr.astype("Int64") + expected = pd.array([0, 1, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_str(): + a = pd.array([0.1, 0.2, None], dtype="Float64") + expected = np.array(["0.1", "0.2", ""], dtype="U32") + + tm.assert_numpy_array_equal(a.astype(str), expected) + tm.assert_numpy_array_equal(a.astype("str"), expected) + + +def test_astype_copy(): + arr = pd.array([0.1, 0.2, None], dtype="Float64") + orig = pd.array([0.1, 0.2, None], dtype="Float64") + + # copy=True -> ensure both data and mask are actual copies + result = arr.astype("Float64", copy=True) + assert result is not arr + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + # copy=False + result = arr.astype("Float64", copy=False) + assert result is arr + assert np.shares_memory(result._data, arr._data) + assert np.shares_memory(result._mask, arr._mask) + result[0] = 10 + assert arr[0] == 10 + result[0] = pd.NA + assert arr[0] is pd.NA + + # astype to different dtype -> always needs a copy -> even with copy=False + # we need to ensure that also the mask is actually copied + arr = pd.array([0.1, 0.2, None], dtype="Float64") + orig = pd.array([0.1, 0.2, None], dtype="Float64") + + result = arr.astype("Float32", copy=False) + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + +def test_astype_object(dtype): + arr = pd.array([1.0, pd.NA], dtype=dtype) + + result = arr.astype(object) + expected = np.array([1.0, pd.NA], dtype=object) + tm.assert_numpy_array_equal(result, expected) + # check exact element types + assert isinstance(result[0], float) + assert result[1] is pd.NA + + +def test_Float64_conversion(): + # GH#40729 + testseries = pd.Series(["1", "2", "3", "4"], dtype="object") + result = testseries.astype(pd.Float64Dtype()) + + expected = pd.Series([1.0, 2.0, 3.0, 4.0], dtype=pd.Float64Dtype()) + + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_comparison.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..a429649f1ce1dc10fc9610faa73a81dd94255b37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_comparison.py @@ -0,0 +1,65 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray +from pandas.tests.arrays.masked_shared import ( + ComparisonOps, + NumericOps, +) + + +class TestComparisonOps(NumericOps, ComparisonOps): + @pytest.mark.parametrize("other", [True, False, pd.NA, -1.0, 0.0, 1]) + def test_scalar(self, other, comparison_op, dtype): + ComparisonOps.test_scalar(self, other, comparison_op, dtype) + + def test_compare_with_integerarray(self, comparison_op): + op = comparison_op + a = pd.array([0, 1, None] * 3, dtype="Int64") + b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Float64") + other = b.astype("Int64") + expected = op(a, other) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + expected = op(other, a) + result = op(b, a) + tm.assert_extension_array_equal(result, expected) + + +def test_equals(): + # GH-30652 + # equals is generally tested in /tests/extension/base/methods, but this + # specifically tests that two arrays of the same class but different dtype + # do not evaluate equal + a1 = pd.array([1, 2, None], dtype="Float64") + a2 = pd.array([1, 2, None], dtype="Float32") + assert a1.equals(a2) is False + + +def test_equals_nan_vs_na(): + # GH#44382 + + mask = np.zeros(3, dtype=bool) + data = np.array([1.0, np.nan, 3.0], dtype=np.float64) + + left = FloatingArray(data, mask) + assert left.equals(left) + tm.assert_extension_array_equal(left, left) + + assert left.equals(left.copy()) + assert left.equals(FloatingArray(data.copy(), mask.copy())) + + mask2 = np.array([False, True, False], dtype=bool) + data2 = np.array([1.0, 2.0, 3.0], dtype=np.float64) + right = FloatingArray(data2, mask2) + assert right.equals(right) + tm.assert_extension_array_equal(right, right) + + assert not left.equals(right) + + # with mask[1] = True, the only difference is data[1], which should + # not matter for equals + mask[1] = True + assert left.equals(right) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_concat.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_concat.py new file mode 100644 index 0000000000000000000000000000000000000000..2174a834aa959b88d899971f83247258a94476e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_concat.py @@ -0,0 +1,20 @@ +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Float64", "Float64"], "Float64"), + (["Float32", "Float64"], "Float64"), + (["Float32", "Float32"], "Float32"), + ], +) +def test_concat_series(to_concat_dtypes, result_dtype): + result = pd.concat([pd.Series([1, 2, pd.NA], dtype=t) for t in to_concat_dtypes]) + expected = pd.concat([pd.Series([1, 2, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_contains.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_contains.py new file mode 100644 index 0000000000000000000000000000000000000000..956642697bf3285e5c661c43047a5f0dafa83144 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_contains.py @@ -0,0 +1,12 @@ +import numpy as np + +import pandas as pd + + +def test_contains_nan(): + # GH#52840 + arr = pd.array(range(5)) / 0 + + assert np.isnan(arr._data[0]) + assert not arr.isna()[0] + assert np.nan in arr diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_function.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_function.py new file mode 100644 index 0000000000000000000000000000000000000000..40fd66fd049a621138c2cda074a08a1a94967bb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_function.py @@ -0,0 +1,194 @@ +import numpy as np +import pytest + +from pandas.compat import IS64 + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize("ufunc", [np.abs, np.sign]) +# np.sign emits a warning with nans, +@pytest.mark.filterwarnings("ignore:invalid value encountered in sign:RuntimeWarning") +def test_ufuncs_single(ufunc): + a = pd.array([1, 2, -3, np.nan], dtype="Float64") + result = ufunc(a) + expected = pd.array(ufunc(a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + result = ufunc(s) + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt]) +def test_ufuncs_single_float(ufunc): + a = pd.array([1.0, 0.2, 3.0, np.nan], dtype="Float64") + with np.errstate(invalid="ignore"): + result = ufunc(a) + expected = pd.array(ufunc(a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + with np.errstate(invalid="ignore"): + result = ufunc(s) + expected = pd.Series(ufunc(s.astype(float)), dtype="Float64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.add, np.subtract]) +def test_ufuncs_binary_float(ufunc): + # two FloatingArrays + a = pd.array([1, 0.2, -3, np.nan], dtype="Float64") + result = ufunc(a, a) + expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + # FloatingArray with numpy array + arr = np.array([1, 2, 3, 4]) + result = ufunc(a, arr) + expected = pd.array(ufunc(a.astype(float), arr), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(arr, a) + expected = pd.array(ufunc(arr, a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + # FloatingArray with scalar + result = ufunc(a, 1) + expected = pd.array(ufunc(a.astype(float), 1), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(1, a) + expected = pd.array(ufunc(1, a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("values", [[0, 1], [0, None]]) +def test_ufunc_reduce_raises(values): + arr = pd.array(values, dtype="Float64") + + res = np.add.reduce(arr) + expected = arr.sum(skipna=False) + tm.assert_almost_equal(res, expected) + + +@pytest.mark.skipif(not IS64, reason="GH 36579: fail on 32-bit system") +@pytest.mark.parametrize( + "pandasmethname, kwargs", + [ + ("var", {"ddof": 0}), + ("var", {"ddof": 1}), + ("std", {"ddof": 0}), + ("std", {"ddof": 1}), + ("kurtosis", {}), + ("skew", {}), + ("sem", {}), + ], +) +def test_stat_method(pandasmethname, kwargs): + s = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, np.nan, np.nan], dtype="Float64") + pandasmeth = getattr(s, pandasmethname) + result = pandasmeth(**kwargs) + s2 = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype="float64") + pandasmeth = getattr(s2, pandasmethname) + expected = pandasmeth(**kwargs) + assert expected == result + + +def test_value_counts_na(): + arr = pd.array([0.1, 0.2, 0.1, pd.NA], dtype="Float64") + result = arr.value_counts(dropna=False) + idx = pd.Index([0.1, 0.2, pd.NA], dtype=arr.dtype) + assert idx.dtype == arr.dtype + expected = pd.Series([2, 1, 1], index=idx, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([2, 1], index=idx[:-1], dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_empty(): + ser = pd.Series([], dtype="Float64") + result = ser.value_counts() + idx = pd.Index([], dtype="Float64") + assert idx.dtype == "Float64" + expected = pd.Series([], index=idx, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_with_normalize(): + ser = pd.Series([0.1, 0.2, 0.1, pd.NA], dtype="Float64") + result = ser.value_counts(normalize=True) + expected = pd.Series([2, 1], index=ser[:2], dtype="Float64", name="proportion") / 3 + assert expected.index.dtype == ser.dtype + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 4]) +def test_floating_array_sum(skipna, min_count, dtype): + arr = pd.array([1, 2, 3, None], dtype=dtype) + result = arr.sum(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 6.0 + else: + assert result is pd.NA + + +@pytest.mark.parametrize( + "values, expected", [([1, 2, 3], 6.0), ([1, 2, 3, None], 6.0), ([None], 0.0)] +) +def test_floating_array_numpy_sum(values, expected): + arr = pd.array(values, dtype="Float64") + result = np.sum(arr) + assert result == expected + + +@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"]) +def test_preserve_dtypes(op): + df = pd.DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": pd.array([0.1, None, 3.0], dtype="Float64"), + } + ) + + # op + result = getattr(df.C, op)() + assert isinstance(result, np.float64) + + # groupby + result = getattr(df.groupby("A"), op)() + + expected = pd.DataFrame( + {"B": np.array([1.0, 3.0]), "C": pd.array([0.1, 3], dtype="Float64")}, + index=pd.Index(["a", "b"], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("method", ["min", "max"]) +def test_floating_array_min_max(skipna, method, dtype): + arr = pd.array([0.0, 1.0, None], dtype=dtype) + func = getattr(arr, method) + result = func(skipna=skipna) + if skipna: + assert result == (0 if method == "min" else 1) + else: + assert result is pd.NA + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 9]) +def test_floating_array_prod(skipna, min_count, dtype): + arr = pd.array([1.0, 2.0, None], dtype=dtype) + result = arr.prod(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 2 + else: + assert result is pd.NA diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_repr.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..ea2cdd4fab86ada36d6d5804204c4a479a3e1603 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_repr.py @@ -0,0 +1,47 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.floating import ( + Float32Dtype, + Float64Dtype, +) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + np.dtype(dtype.type).kind == "f" + assert dtype.name is not None + + +@pytest.mark.parametrize( + "dtype, expected", + [(Float32Dtype(), "Float32Dtype()"), (Float64Dtype(), "Float64Dtype()")], +) +def test_repr_dtype(dtype, expected): + assert repr(dtype) == expected + + +def test_repr_array(): + result = repr(pd.array([1.0, None, 3.0])) + expected = "\n[1.0, , 3.0]\nLength: 3, dtype: Float64" + assert result == expected + + +def test_repr_array_long(): + data = pd.array([1.0, 2.0, None] * 1000) + expected = """ +[ 1.0, 2.0, , 1.0, 2.0, , 1.0, 2.0, , 1.0, + ... + , 1.0, 2.0, , 1.0, 2.0, , 1.0, 2.0, ] +Length: 3000, dtype: Float64""" + result = repr(data) + assert result == expected + + +def test_frame_repr(data_missing): + df = pd.DataFrame({"A": data_missing}) + result = repr(df) + expected = " A\n0 \n1 0.1" + assert result == expected diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_to_numpy.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_to_numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..e954cecba417afd71059a35f7506c650eb780373 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_to_numpy.py @@ -0,0 +1,132 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy(box): + con = pd.Series if box else pd.array + + # default (with or without missing values) -> object dtype + arr = con([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy() + expected = np.array([0.1, 0.2, 0.3], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + arr = con([0.1, 0.2, None], dtype="Float64") + result = arr.to_numpy() + expected = np.array([0.1, 0.2, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_float(box): + con = pd.Series if box else pd.array + + # no missing values -> can convert to float, otherwise raises + arr = con([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy(dtype="float64") + expected = np.array([0.1, 0.2, 0.3], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + arr = con([0.1, 0.2, None], dtype="Float64") + result = arr.to_numpy(dtype="float64") + expected = np.array([0.1, 0.2, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype="float64", na_value=np.nan) + expected = np.array([0.1, 0.2, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_int(box): + con = pd.Series if box else pd.array + + # no missing values -> can convert to int, otherwise raises + arr = con([1.0, 2.0, 3.0], dtype="Float64") + result = arr.to_numpy(dtype="int64") + expected = np.array([1, 2, 3], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + arr = con([1.0, 2.0, None], dtype="Float64") + with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"): + result = arr.to_numpy(dtype="int64") + + # automatic casting (floors the values) + arr = con([0.1, 0.9, 1.1], dtype="Float64") + result = arr.to_numpy(dtype="int64") + expected = np.array([0, 0, 1], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_na_value(box): + con = pd.Series if box else pd.array + + arr = con([0.0, 1.0, None], dtype="Float64") + result = arr.to_numpy(dtype=object, na_value=None) + expected = np.array([0.0, 1.0, None], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype=bool, na_value=False) + expected = np.array([False, True, False], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype="int64", na_value=-99) + expected = np.array([0, 1, -99], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_na_value_with_nan(): + # array with both NaN and NA -> only fill NA with `na_value` + arr = FloatingArray(np.array([0.0, np.nan, 0.0]), np.array([False, False, True])) + result = arr.to_numpy(dtype="float64", na_value=-1) + expected = np.array([0.0, np.nan, -1.0], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "float32", "int32", "int64", "bool"]) +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_dtype(box, dtype): + con = pd.Series if box else pd.array + arr = con([0.0, 1.0], dtype="Float64") + + result = arr.to_numpy(dtype=dtype) + expected = np.array([0, 1], dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"]) +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_na_raises(box, dtype): + con = pd.Series if box else pd.array + arr = con([0.0, 1.0, None], dtype="Float64") + with pytest.raises(ValueError, match=dtype): + arr.to_numpy(dtype=dtype) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_string(box, dtype): + con = pd.Series if box else pd.array + arr = con([0.0, 1.0, None], dtype="Float64") + + result = arr.to_numpy(dtype="str") + expected = np.array([0.0, 1.0, pd.NA], dtype=f"{tm.ENDIAN}U32") + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_copy(): + # to_numpy can be zero-copy if no missing values + arr = pd.array([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy(dtype="float64") + result[0] = 10 + tm.assert_extension_array_equal(arr, pd.array([10, 0.2, 0.3], dtype="Float64")) + + arr = pd.array([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy(dtype="float64", copy=True) + result[0] = 10 + tm.assert_extension_array_equal(arr, pd.array([0.1, 0.2, 0.3], dtype="Float64")) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0d012daea0f71f008b79fce795202957358c1cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_accessor.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dcd5d639e56e5fec78027c260ec3f182e9c1cc5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_accessor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_arithmetics.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_arithmetics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc0d0b83e7aecdebb187e5238aba5046329d2225 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_arithmetics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_array.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7d95cb0d44dfcceea378fd888a2cd61e5c1c592 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_array.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0799a1f219729da3a1a5b6ce5d514dc50615071b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_dtype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc2e617ac4e1e9416f3cd72e21511f6e03595aa8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_dtype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91934a9f2a9a4094b6f0e47aa304af2f577f38f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_libsparse.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_libsparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbae1fd5b5cd58b73ff18d9ec23d53ab2f9ec7d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_libsparse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_accessor.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..87eb7bcfa9cee3e92386ad0f148b896c0e682b07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_accessor.py @@ -0,0 +1,253 @@ +import string + +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestSeriesAccessor: + def test_to_dense(self): + ser = pd.Series([0, 1, 0, 10], dtype="Sparse[int64]") + result = ser.sparse.to_dense() + expected = pd.Series([0, 1, 0, 10]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("attr", ["npoints", "density", "fill_value", "sp_values"]) + def test_get_attributes(self, attr): + arr = SparseArray([0, 1]) + ser = pd.Series(arr) + + result = getattr(ser.sparse, attr) + expected = getattr(arr, attr) + assert result == expected + + def test_from_coo(self): + scipy_sparse = pytest.importorskip("scipy.sparse") + + row = [0, 3, 1, 0] + col = [0, 3, 1, 2] + data = [4, 5, 7, 9] + + sp_array = scipy_sparse.coo_matrix((data, (row, col))) + result = pd.Series.sparse.from_coo(sp_array) + + index = pd.MultiIndex.from_arrays( + [ + np.array([0, 0, 1, 3], dtype=np.int32), + np.array([0, 2, 1, 3], dtype=np.int32), + ], + ) + expected = pd.Series([4, 9, 7, 5], index=index, dtype="Sparse[int]") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "sort_labels, expected_rows, expected_cols, expected_values_pos", + [ + ( + False, + [("b", 2), ("a", 2), ("b", 1), ("a", 1)], + [("z", 1), ("z", 2), ("x", 2), ("z", 0)], + {1: (1, 0), 3: (3, 3)}, + ), + ( + True, + [("a", 1), ("a", 2), ("b", 1), ("b", 2)], + [("x", 2), ("z", 0), ("z", 1), ("z", 2)], + {1: (1, 2), 3: (0, 1)}, + ), + ], + ) + def test_to_coo( + self, sort_labels, expected_rows, expected_cols, expected_values_pos + ): + sp_sparse = pytest.importorskip("scipy.sparse") + + values = SparseArray([0, np.nan, 1, 0, None, 3], fill_value=0) + index = pd.MultiIndex.from_tuples( + [ + ("b", 2, "z", 1), + ("a", 2, "z", 2), + ("a", 2, "z", 1), + ("a", 2, "x", 2), + ("b", 1, "z", 1), + ("a", 1, "z", 0), + ] + ) + ss = pd.Series(values, index=index) + + expected_A = np.zeros((4, 4)) + for value, (row, col) in expected_values_pos.items(): + expected_A[row, col] = value + + A, rows, cols = ss.sparse.to_coo( + row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels + ) + assert isinstance(A, sp_sparse.coo_matrix) + tm.assert_numpy_array_equal(A.toarray(), expected_A) + assert rows == expected_rows + assert cols == expected_cols + + def test_non_sparse_raises(self): + ser = pd.Series([1, 2, 3]) + with pytest.raises(AttributeError, match=".sparse"): + ser.sparse.density + + +class TestFrameAccessor: + def test_accessor_raises(self): + df = pd.DataFrame({"A": [0, 1]}) + with pytest.raises(AttributeError, match="sparse"): + df.sparse + + @pytest.mark.parametrize("format", ["csc", "csr", "coo"]) + @pytest.mark.parametrize("labels", [None, list(string.ascii_letters[:10])]) + @pytest.mark.parametrize("dtype", ["float64", "int64"]) + def test_from_spmatrix(self, format, labels, dtype): + sp_sparse = pytest.importorskip("scipy.sparse") + + sp_dtype = SparseDtype(dtype, np.array(0, dtype=dtype).item()) + + mat = sp_sparse.eye(10, format=format, dtype=dtype) + result = pd.DataFrame.sparse.from_spmatrix(mat, index=labels, columns=labels) + expected = pd.DataFrame( + np.eye(10, dtype=dtype), index=labels, columns=labels + ).astype(sp_dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("format", ["csc", "csr", "coo"]) + def test_from_spmatrix_including_explicit_zero(self, format): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.random(10, 2, density=0.5, format=format) + mat.data[0] = 0 + result = pd.DataFrame.sparse.from_spmatrix(mat) + dtype = SparseDtype("float64", 0.0) + expected = pd.DataFrame(mat.todense()).astype(dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "columns", + [["a", "b"], pd.MultiIndex.from_product([["A"], ["a", "b"]]), ["a", "a"]], + ) + def test_from_spmatrix_columns(self, columns): + sp_sparse = pytest.importorskip("scipy.sparse") + + dtype = SparseDtype("float64", 0.0) + + mat = sp_sparse.random(10, 2, density=0.5) + result = pd.DataFrame.sparse.from_spmatrix(mat, columns=columns) + expected = pd.DataFrame(mat.toarray(), columns=columns).astype(dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "colnames", [("A", "B"), (1, 2), (1, pd.NA), (0.1, 0.2), ("x", "x"), (0, 0)] + ) + def test_to_coo(self, colnames): + sp_sparse = pytest.importorskip("scipy.sparse") + + df = pd.DataFrame( + {colnames[0]: [0, 1, 0], colnames[1]: [1, 0, 0]}, dtype="Sparse[int64, 0]" + ) + result = df.sparse.to_coo() + expected = sp_sparse.coo_matrix(np.asarray(df)) + assert (result != expected).nnz == 0 + + @pytest.mark.parametrize("fill_value", [1, np.nan]) + def test_to_coo_nonzero_fill_val_raises(self, fill_value): + pytest.importorskip("scipy") + df = pd.DataFrame( + { + "A": SparseArray( + [fill_value, fill_value, fill_value, 2], fill_value=fill_value + ), + "B": SparseArray( + [fill_value, 2, fill_value, fill_value], fill_value=fill_value + ), + } + ) + with pytest.raises(ValueError, match="fill value must be 0"): + df.sparse.to_coo() + + def test_to_coo_midx_categorical(self): + # GH#50996 + sp_sparse = pytest.importorskip("scipy.sparse") + + midx = pd.MultiIndex.from_arrays( + [ + pd.CategoricalIndex(list("ab"), name="x"), + pd.CategoricalIndex([0, 1], name="y"), + ] + ) + + ser = pd.Series(1, index=midx, dtype="Sparse[int]") + result = ser.sparse.to_coo(row_levels=["x"], column_levels=["y"])[0] + expected = sp_sparse.coo_matrix( + (np.array([1, 1]), (np.array([0, 1]), np.array([0, 1]))), shape=(2, 2) + ) + assert (result != expected).nnz == 0 + + def test_to_dense(self): + df = pd.DataFrame( + { + "A": SparseArray([1, 0], dtype=SparseDtype("int64", 0)), + "B": SparseArray([1, 0], dtype=SparseDtype("int64", 1)), + "C": SparseArray([1.0, 0.0], dtype=SparseDtype("float64", 0.0)), + }, + index=["b", "a"], + ) + result = df.sparse.to_dense() + expected = pd.DataFrame( + {"A": [1, 0], "B": [1, 0], "C": [1.0, 0.0]}, index=["b", "a"] + ) + tm.assert_frame_equal(result, expected) + + def test_density(self): + df = pd.DataFrame( + { + "A": SparseArray([1, 0, 2, 1], fill_value=0), + "B": SparseArray([0, 1, 1, 1], fill_value=0), + } + ) + res = df.sparse.density + expected = 0.75 + assert res == expected + + @pytest.mark.parametrize("dtype", ["int64", "float64"]) + @pytest.mark.parametrize("dense_index", [True, False]) + def test_series_from_coo(self, dtype, dense_index): + sp_sparse = pytest.importorskip("scipy.sparse") + + A = sp_sparse.eye(3, format="coo", dtype=dtype) + result = pd.Series.sparse.from_coo(A, dense_index=dense_index) + + index = pd.MultiIndex.from_tuples( + [ + np.array([0, 0], dtype=np.int32), + np.array([1, 1], dtype=np.int32), + np.array([2, 2], dtype=np.int32), + ], + ) + expected = pd.Series(SparseArray(np.array([1, 1, 1], dtype=dtype)), index=index) + if dense_index: + expected = expected.reindex(pd.MultiIndex.from_product(index.levels)) + + tm.assert_series_equal(result, expected) + + def test_series_from_coo_incorrect_format_raises(self): + # gh-26554 + sp_sparse = pytest.importorskip("scipy.sparse") + + m = sp_sparse.csr_matrix(np.array([[0, 1], [0, 0]])) + with pytest.raises( + TypeError, match="Expected coo_matrix. Got csr_matrix instead." + ): + pd.Series.sparse.from_coo(m) + + def test_with_column_named_sparse(self): + # https://github.com/pandas-dev/pandas/issues/30758 + df = pd.DataFrame({"sparse": pd.arrays.SparseArray([1, 2])}) + assert isinstance(df.sparse, pd.core.arrays.sparse.accessor.SparseFrameAccessor) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py new file mode 100644 index 0000000000000000000000000000000000000000..ffc93b4e4f176385ac7b2b8a0b51027cb0bad9f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py @@ -0,0 +1,514 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +@pytest.fixture(params=["integer", "block"]) +def kind(request): + """kind kwarg to pass to SparseArray""" + return request.param + + +@pytest.fixture(params=[True, False]) +def mix(request): + """ + Fixture returning True or False, determining whether to operate + op(sparse, dense) instead of op(sparse, sparse) + """ + return request.param + + +class TestSparseArrayArithmetics: + def _assert(self, a, b): + # We have to use tm.assert_sp_array_equal. See GH #45126 + tm.assert_numpy_array_equal(a, b) + + def _check_numeric_ops(self, a, b, a_dense, b_dense, mix: bool, op): + # Check that arithmetic behavior matches non-Sparse Series arithmetic + + if isinstance(a_dense, np.ndarray): + expected = op(pd.Series(a_dense), b_dense).values + elif isinstance(b_dense, np.ndarray): + expected = op(a_dense, pd.Series(b_dense)).values + else: + raise NotImplementedError + + with np.errstate(invalid="ignore", divide="ignore"): + if mix: + result = op(a, b_dense).to_dense() + else: + result = op(a, b).to_dense() + + self._assert(result, expected) + + def _check_bool_result(self, res): + assert isinstance(res, SparseArray) + assert isinstance(res.dtype, SparseDtype) + assert res.dtype.subtype == np.bool_ + assert isinstance(res.fill_value, bool) + + def _check_comparison_ops(self, a, b, a_dense, b_dense): + with np.errstate(invalid="ignore"): + # Unfortunately, trying to wrap the computation of each expected + # value is with np.errstate() is too tedious. + # + # sparse & sparse + self._check_bool_result(a == b) + self._assert((a == b).to_dense(), a_dense == b_dense) + + self._check_bool_result(a != b) + self._assert((a != b).to_dense(), a_dense != b_dense) + + self._check_bool_result(a >= b) + self._assert((a >= b).to_dense(), a_dense >= b_dense) + + self._check_bool_result(a <= b) + self._assert((a <= b).to_dense(), a_dense <= b_dense) + + self._check_bool_result(a > b) + self._assert((a > b).to_dense(), a_dense > b_dense) + + self._check_bool_result(a < b) + self._assert((a < b).to_dense(), a_dense < b_dense) + + # sparse & dense + self._check_bool_result(a == b_dense) + self._assert((a == b_dense).to_dense(), a_dense == b_dense) + + self._check_bool_result(a != b_dense) + self._assert((a != b_dense).to_dense(), a_dense != b_dense) + + self._check_bool_result(a >= b_dense) + self._assert((a >= b_dense).to_dense(), a_dense >= b_dense) + + self._check_bool_result(a <= b_dense) + self._assert((a <= b_dense).to_dense(), a_dense <= b_dense) + + self._check_bool_result(a > b_dense) + self._assert((a > b_dense).to_dense(), a_dense > b_dense) + + self._check_bool_result(a < b_dense) + self._assert((a < b_dense).to_dense(), a_dense < b_dense) + + def _check_logical_ops(self, a, b, a_dense, b_dense): + # sparse & sparse + self._check_bool_result(a & b) + self._assert((a & b).to_dense(), a_dense & b_dense) + + self._check_bool_result(a | b) + self._assert((a | b).to_dense(), a_dense | b_dense) + # sparse & dense + self._check_bool_result(a & b_dense) + self._assert((a & b_dense).to_dense(), a_dense & b_dense) + + self._check_bool_result(a | b_dense) + self._assert((a | b_dense).to_dense(), a_dense | b_dense) + + @pytest.mark.parametrize("scalar", [0, 1, 3]) + @pytest.mark.parametrize("fill_value", [None, 0, 2]) + def test_float_scalar( + self, kind, mix, all_arithmetic_functions, fill_value, scalar, request + ): + op = all_arithmetic_functions + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + a = SparseArray(values, kind=kind, fill_value=fill_value) + self._check_numeric_ops(a, scalar, values, scalar, mix, op) + + def test_float_scalar_comparison(self, kind): + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + + a = SparseArray(values, kind=kind) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) + + a = SparseArray(values, kind=kind, fill_value=0) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) + + a = SparseArray(values, kind=kind, fill_value=2) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) + + def test_float_same_index_without_nans(self, kind, mix, all_arithmetic_functions): + # when sp_index are the same + op = all_arithmetic_functions + + values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) + rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_same_index_with_nans( + self, kind, mix, all_arithmetic_functions, request + ): + # when sp_index are the same + op = all_arithmetic_functions + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_same_index_comparison(self, kind): + # when sp_index are the same + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + + values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) + rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + def test_float_array(self, kind, mix, all_arithmetic_functions): + op = all_arithmetic_functions + + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_array_different_kind(self, mix, all_arithmetic_functions): + op = all_arithmetic_functions + + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) + + a = SparseArray(values, kind="integer") + b = SparseArray(rvalues, kind="block") + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, kind="integer", fill_value=0) + b = SparseArray(rvalues, kind="block") + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind="integer", fill_value=0) + b = SparseArray(rvalues, kind="block", fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind="integer", fill_value=1) + b = SparseArray(rvalues, kind="block", fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_array_comparison(self, kind): + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + def test_int_array(self, kind, mix, all_arithmetic_functions): + op = all_arithmetic_functions + + # have to specify dtype explicitly until fixing GH 667 + dtype = np.int64 + + values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype) + + a = SparseArray(values, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = SparseArray(rvalues, fill_value=0, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, fill_value=1, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype, fill_value=1) + b = SparseArray(rvalues, fill_value=2, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_int_array_comparison(self, kind): + dtype = "int64" + # int32 NI ATM + + values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype) + + a = SparseArray(values, dtype=dtype, kind=kind) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) + + a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0) + b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, dtype=dtype, kind=kind, fill_value=1) + b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + @pytest.mark.parametrize("fill_value", [True, False, np.nan]) + def test_bool_same_index(self, kind, fill_value): + # GH 14000 + # when sp_index are the same + values = np.array([True, False, True, True], dtype=np.bool_) + rvalues = np.array([True, False, True, True], dtype=np.bool_) + + a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value) + b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value) + self._check_logical_ops(a, b, values, rvalues) + + @pytest.mark.parametrize("fill_value", [True, False, np.nan]) + def test_bool_array_logical(self, kind, fill_value): + # GH 14000 + # when sp_index are the same + values = np.array([True, False, True, False, True, True], dtype=np.bool_) + rvalues = np.array([True, False, False, True, False, True], dtype=np.bool_) + + a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value) + b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value) + self._check_logical_ops(a, b, values, rvalues) + + def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request): + op = all_arithmetic_functions + rdtype = "int64" + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + assert b.dtype == SparseDtype(rdtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + assert b.dtype == SparseDtype(rdtype, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_mixed_array_comparison(self, kind): + rdtype = "int64" + # int32 NI ATM + + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + assert b.dtype == SparseDtype(rdtype) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + assert b.dtype == SparseDtype(rdtype, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + def test_xor(self): + s = SparseArray([True, True, False, False]) + t = SparseArray([True, False, True, False]) + result = s ^ t + sp_index = pd.core.arrays.sparse.IntIndex(4, np.array([0, 1, 2], dtype="int32")) + expected = SparseArray([False, True, True], sparse_index=sp_index) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("op", [operator.eq, operator.add]) +def test_with_list(op): + arr = SparseArray([0, 1], fill_value=0) + result = op(arr, [0, 1]) + expected = op(arr, SparseArray([0, 1])) + tm.assert_sp_array_equal(result, expected) + + +def test_with_dataframe(): + # GH#27910 + arr = SparseArray([0, 1], fill_value=0) + df = pd.DataFrame([[1, 2], [3, 4]]) + result = arr.__add__(df) + assert result is NotImplemented + + +def test_with_zerodim_ndarray(): + # GH#27910 + arr = SparseArray([0, 1], fill_value=0) + + result = arr * np.array(2) + expected = arr * 2 + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.abs, np.exp]) +@pytest.mark.parametrize( + "arr", [SparseArray([0, 0, -1, 1]), SparseArray([None, None, -1, 1])] +) +def test_ufuncs(ufunc, arr): + result = ufunc(arr) + fill_value = ufunc(arr.fill_value) + expected = SparseArray(ufunc(np.asarray(arr)), fill_value=fill_value) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize( + "a, b", + [ + (SparseArray([0, 0, 0]), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + ], +) +@pytest.mark.parametrize("ufunc", [np.add, np.greater]) +def test_binary_ufuncs(ufunc, a, b): + # can't say anything about fill value here. + result = ufunc(a, b) + expected = ufunc(np.asarray(a), np.asarray(b)) + assert isinstance(result, SparseArray) + tm.assert_numpy_array_equal(np.asarray(result), expected) + + +def test_ndarray_inplace(): + sparray = SparseArray([0, 2, 0, 0]) + ndarray = np.array([0, 1, 2, 3]) + ndarray += sparray + expected = np.array([0, 3, 2, 3]) + tm.assert_numpy_array_equal(ndarray, expected) + + +def test_sparray_inplace(): + sparray = SparseArray([0, 2, 0, 0]) + ndarray = np.array([0, 1, 2, 3]) + sparray += ndarray + expected = SparseArray([0, 3, 2, 3], fill_value=0) + tm.assert_sp_array_equal(sparray, expected) + + +@pytest.mark.parametrize("cons", [list, np.array, SparseArray]) +def test_mismatched_length_cmp_op(cons): + left = SparseArray([True, True]) + right = cons([True, True, True]) + with pytest.raises(ValueError, match="operands have mismatched length"): + left & right + + +@pytest.mark.parametrize("op", ["add", "sub", "mul", "truediv", "floordiv", "pow"]) +@pytest.mark.parametrize("fill_value", [np.nan, 3]) +def test_binary_operators(op, fill_value): + op = getattr(operator, op) + data1 = np.random.default_rng(2).standard_normal(20) + data2 = np.random.default_rng(2).standard_normal(20) + + data1[::2] = fill_value + data2[::3] = fill_value + + first = SparseArray(data1, fill_value=fill_value) + second = SparseArray(data2, fill_value=fill_value) + + with np.errstate(all="ignore"): + res = op(first, second) + exp = SparseArray( + op(first.to_dense(), second.to_dense()), fill_value=first.fill_value + ) + assert isinstance(res, SparseArray) + tm.assert_almost_equal(res.to_dense(), exp.to_dense()) + + res2 = op(first, second.to_dense()) + assert isinstance(res2, SparseArray) + tm.assert_sp_array_equal(res, res2) + + res3 = op(first.to_dense(), second) + assert isinstance(res3, SparseArray) + tm.assert_sp_array_equal(res, res3) + + res4 = op(first, 4) + assert isinstance(res4, SparseArray) + + # Ignore this if the actual op raises (e.g. pow). + try: + exp = op(first.to_dense(), 4) + exp_fv = op(first.fill_value, 4) + except ValueError: + pass + else: + tm.assert_almost_equal(res4.fill_value, exp_fv) + tm.assert_almost_equal(res4.to_dense(), exp) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_array.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_array.py new file mode 100644 index 0000000000000000000000000000000000000000..883d6ea3959ff6c12659c55762b889be18349ef7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_array.py @@ -0,0 +1,480 @@ +import re + +import numpy as np +import pytest + +from pandas._libs.sparse import IntIndex + +import pandas as pd +from pandas import ( + SparseDtype, + isna, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +@pytest.fixture +def arr_data(): + """Fixture returning numpy array with valid and missing entries""" + return np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) + + +@pytest.fixture +def arr(arr_data): + """Fixture returning SparseArray from 'arr_data'""" + return SparseArray(arr_data) + + +@pytest.fixture +def zarr(): + """Fixture returning SparseArray with integer entries and 'fill_value=0'""" + return SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + + +class TestSparseArray: + @pytest.mark.parametrize("fill_value", [0, None, np.nan]) + def test_shift_fill_value(self, fill_value): + # GH #24128 + sparse = SparseArray(np.array([1, 0, 0, 3, 0]), fill_value=8.0) + res = sparse.shift(1, fill_value=fill_value) + if isna(fill_value): + fill_value = res.dtype.na_value + exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]), fill_value=8.0) + tm.assert_sp_array_equal(res, exp) + + def test_set_fill_value(self): + arr = SparseArray([1.0, np.nan, 2.0], fill_value=np.nan) + arr.fill_value = 2 + assert arr.fill_value == 2 + + arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64) + arr.fill_value = 2 + assert arr.fill_value == 2 + + msg = "Allowing arbitrary scalar fill_value in SparseDtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + arr.fill_value = 3.1 + assert arr.fill_value == 3.1 + + arr.fill_value = np.nan + assert np.isnan(arr.fill_value) + + arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_) + arr.fill_value = True + assert arr.fill_value is True + + with tm.assert_produces_warning(FutureWarning, match=msg): + arr.fill_value = 0 + + arr.fill_value = np.nan + assert np.isnan(arr.fill_value) + + @pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)]) + def test_set_fill_invalid_non_scalar(self, val): + arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_) + msg = "fill_value must be a scalar" + + with pytest.raises(ValueError, match=msg): + arr.fill_value = val + + def test_copy(self, arr): + arr2 = arr.copy() + assert arr2.sp_values is not arr.sp_values + assert arr2.sp_index is arr.sp_index + + def test_values_asarray(self, arr_data, arr): + tm.assert_almost_equal(arr.to_dense(), arr_data) + + @pytest.mark.parametrize( + "data,shape,dtype", + [ + ([0, 0, 0, 0, 0], (5,), None), + ([], (0,), None), + ([0], (1,), None), + (["A", "A", np.nan, "B"], (4,), object), + ], + ) + def test_shape(self, data, shape, dtype): + # GH 21126 + out = SparseArray(data, dtype=dtype) + assert out.shape == shape + + @pytest.mark.parametrize( + "vals", + [ + [np.nan, np.nan, np.nan, np.nan, np.nan], + [1, np.nan, np.nan, 3, np.nan], + [1, np.nan, 0, 3, 0], + ], + ) + @pytest.mark.parametrize("fill_value", [None, 0]) + def test_dense_repr(self, vals, fill_value): + vals = np.array(vals) + arr = SparseArray(vals, fill_value=fill_value) + + res = arr.to_dense() + tm.assert_numpy_array_equal(res, vals) + + @pytest.mark.parametrize("fix", ["arr", "zarr"]) + def test_pickle(self, fix, request): + obj = request.getfixturevalue(fix) + unpickled = tm.round_trip_pickle(obj) + tm.assert_sp_array_equal(unpickled, obj) + + def test_generator_warnings(self): + sp_arr = SparseArray([1, 2, 3]) + with tm.assert_produces_warning(None): + for _ in sp_arr: + pass + + def test_where_retain_fill_value(self): + # GH#45691 don't lose fill_value on _where + arr = SparseArray([np.nan, 1.0], fill_value=0) + + mask = np.array([True, False]) + + res = arr._where(~mask, 1) + exp = SparseArray([1, 1.0], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + ser = pd.Series(arr) + res = ser.where(~mask, 1) + tm.assert_series_equal(res, pd.Series(exp)) + + def test_fillna(self): + s = SparseArray([1, np.nan, np.nan, 3, np.nan]) + res = s.fillna(-1) + exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0) + res = s.fillna(-1) + exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([1, np.nan, 0, 3, 0]) + res = s.fillna(-1) + exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0) + res = s.fillna(-1) + exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([np.nan, np.nan, np.nan, np.nan]) + res = s.fillna(-1) + exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0) + res = s.fillna(-1) + exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + # float dtype's fill_value is np.nan, replaced by -1 + s = SparseArray([0.0, 0.0, 0.0, 0.0]) + res = s.fillna(-1) + exp = SparseArray([0.0, 0.0, 0.0, 0.0], fill_value=-1) + tm.assert_sp_array_equal(res, exp) + + # int dtype shouldn't have missing. No changes. + s = SparseArray([0, 0, 0, 0]) + assert s.dtype == SparseDtype(np.int64) + assert s.fill_value == 0 + res = s.fillna(-1) + tm.assert_sp_array_equal(res, s) + + s = SparseArray([0, 0, 0, 0], fill_value=0) + assert s.dtype == SparseDtype(np.int64) + assert s.fill_value == 0 + res = s.fillna(-1) + exp = SparseArray([0, 0, 0, 0], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + # fill_value can be nan if there is no missing hole. + # only fill_value will be changed + s = SparseArray([0, 0, 0, 0], fill_value=np.nan) + assert s.dtype == SparseDtype(np.int64, fill_value=np.nan) + assert np.isnan(s.fill_value) + res = s.fillna(-1) + exp = SparseArray([0, 0, 0, 0], fill_value=-1) + tm.assert_sp_array_equal(res, exp) + + def test_fillna_overlap(self): + s = SparseArray([1, np.nan, np.nan, 3, np.nan]) + # filling with existing value doesn't replace existing value with + # fill_value, i.e. existing 3 remains in sp_values + res = s.fillna(3) + exp = np.array([1, 3, 3, 3, 3], dtype=np.float64) + tm.assert_numpy_array_equal(res.to_dense(), exp) + + s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0) + res = s.fillna(3) + exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + def test_nonzero(self): + # Tests regression #21172. + sa = SparseArray([float("nan"), float("nan"), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) + expected = np.array([2, 5, 9], dtype=np.int32) + (result,) = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + + sa = SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) + (result,) = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + + +class TestSparseArrayAnalytics: + @pytest.mark.parametrize( + "data,expected", + [ + ( + np.array([1, 2, 3, 4, 5], dtype=float), # non-null data + SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0])), + ), + ( + np.array([1, 2, np.nan, 4, 5], dtype=float), # null data + SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])), + ), + ], + ) + @pytest.mark.parametrize("numpy", [True, False]) + def test_cumsum(self, data, expected, numpy): + cumsum = np.cumsum if numpy else lambda s: s.cumsum() + + out = cumsum(SparseArray(data)) + tm.assert_sp_array_equal(out, expected) + + out = cumsum(SparseArray(data, fill_value=np.nan)) + tm.assert_sp_array_equal(out, expected) + + out = cumsum(SparseArray(data, fill_value=2)) + tm.assert_sp_array_equal(out, expected) + + if numpy: # numpy compatibility checks. + msg = "the 'dtype' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.cumsum(SparseArray(data), dtype=np.int64) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.cumsum(SparseArray(data), out=out) + else: + axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid. + msg = re.escape(f"axis(={axis}) out of bounds") + with pytest.raises(ValueError, match=msg): + SparseArray(data).cumsum(axis=axis) + + def test_ufunc(self): + # GH 13853 make sure ufunc is applied to fill_value + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + result = SparseArray([1, np.nan, 2, np.nan, 2]) + tm.assert_sp_array_equal(abs(sparse), result) + tm.assert_sp_array_equal(np.abs(sparse), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=1) + result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1) + tm.assert_sp_array_equal(abs(sparse), result) + tm.assert_sp_array_equal(np.abs(sparse), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=-1) + exp = SparseArray([1, 1, 2, 2], fill_value=1) + tm.assert_sp_array_equal(abs(sparse), exp) + tm.assert_sp_array_equal(np.abs(sparse), exp) + + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2])) + tm.assert_sp_array_equal(np.sin(sparse), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=1) + result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1)) + tm.assert_sp_array_equal(np.sin(sparse), result) + + sparse = SparseArray([1, -1, 0, -2], fill_value=0) + result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0)) + tm.assert_sp_array_equal(np.sin(sparse), result) + + def test_ufunc_args(self): + # GH 13853 make sure ufunc is applied to fill_value, including its arg + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + result = SparseArray([2, np.nan, 3, np.nan, -1]) + tm.assert_sp_array_equal(np.add(sparse, 1), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=1) + result = SparseArray([2, 0, 3, -1], fill_value=2) + tm.assert_sp_array_equal(np.add(sparse, 1), result) + + sparse = SparseArray([1, -1, 0, -2], fill_value=0) + result = SparseArray([2, 0, 1, -1], fill_value=1) + tm.assert_sp_array_equal(np.add(sparse, 1), result) + + @pytest.mark.parametrize("fill_value", [0.0, np.nan]) + def test_modf(self, fill_value): + # https://github.com/pandas-dev/pandas/issues/26946 + sparse = SparseArray([fill_value] * 10 + [1.1, 2.2], fill_value=fill_value) + r1, r2 = np.modf(sparse) + e1, e2 = np.modf(np.asarray(sparse)) + tm.assert_sp_array_equal(r1, SparseArray(e1, fill_value=fill_value)) + tm.assert_sp_array_equal(r2, SparseArray(e2, fill_value=fill_value)) + + def test_nbytes_integer(self): + arr = SparseArray([1, 0, 0, 0, 2], kind="integer") + result = arr.nbytes + # (2 * 8) + 2 * 4 + assert result == 24 + + def test_nbytes_block(self): + arr = SparseArray([1, 2, 0, 0, 0], kind="block") + result = arr.nbytes + # (2 * 8) + 4 + 4 + # sp_values, blocs, blengths + assert result == 24 + + def test_asarray_datetime64(self): + s = SparseArray(pd.to_datetime(["2012", None, None, "2013"])) + np.asarray(s) + + def test_density(self): + arr = SparseArray([0, 1]) + assert arr.density == 0.5 + + def test_npoints(self): + arr = SparseArray([0, 1]) + assert arr.npoints == 1 + + +def test_setting_fill_value_fillna_still_works(): + # This is why letting users update fill_value / dtype is bad + # astype has the same problem. + arr = SparseArray([1.0, np.nan, 1.0], fill_value=0.0) + arr.fill_value = np.nan + result = arr.isna() + # Can't do direct comparison, since the sp_index will be different + # So let's convert to ndarray and check there. + result = np.asarray(result) + + expected = np.array([False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + +def test_setting_fill_value_updates(): + arr = SparseArray([0.0, np.nan], fill_value=0) + arr.fill_value = np.nan + # use private constructor to get the index right + # otherwise both nans would be un-stored. + expected = SparseArray._simple_new( + sparse_array=np.array([np.nan]), + sparse_index=IntIndex(2, [1]), + dtype=SparseDtype(float, np.nan), + ) + tm.assert_sp_array_equal(arr, expected) + + +@pytest.mark.parametrize( + "arr,fill_value,loc", + [ + ([None, 1, 2], None, 0), + ([0, None, 2], None, 1), + ([0, 1, None], None, 2), + ([0, 1, 1, None, None], None, 3), + ([1, 1, 1, 2], None, -1), + ([], None, -1), + ([None, 1, 0, 0, None, 2], None, 0), + ([None, 1, 0, 0, None, 2], 1, 1), + ([None, 1, 0, 0, None, 2], 2, 5), + ([None, 1, 0, 0, None, 2], 3, -1), + ([None, 0, 0, 1, 2, 1], 0, 1), + ([None, 0, 0, 1, 2, 1], 1, 3), + ], +) +def test_first_fill_value_loc(arr, fill_value, loc): + result = SparseArray(arr, fill_value=fill_value)._first_fill_value_loc() + assert result == loc + + +@pytest.mark.parametrize( + "arr", + [ + [1, 2, np.nan, np.nan], + [1, np.nan, 2, np.nan], + [1, 2, np.nan], + [np.nan, 1, 0, 0, np.nan, 2], + [np.nan, 0, 0, 1, 2, 1], + ], +) +@pytest.mark.parametrize("fill_value", [np.nan, 0, 1]) +def test_unique_na_fill(arr, fill_value): + a = SparseArray(arr, fill_value=fill_value).unique() + b = pd.Series(arr).unique() + assert isinstance(a, SparseArray) + a = np.asarray(a) + tm.assert_numpy_array_equal(a, b) + + +def test_unique_all_sparse(): + # https://github.com/pandas-dev/pandas/issues/23168 + arr = SparseArray([0, 0]) + result = arr.unique() + expected = SparseArray([0]) + tm.assert_sp_array_equal(result, expected) + + +def test_map(): + arr = SparseArray([0, 1, 2]) + expected = SparseArray([10, 11, 12], fill_value=10) + + # dict + result = arr.map({0: 10, 1: 11, 2: 12}) + tm.assert_sp_array_equal(result, expected) + + # series + result = arr.map(pd.Series({0: 10, 1: 11, 2: 12})) + tm.assert_sp_array_equal(result, expected) + + # function + result = arr.map(pd.Series({0: 10, 1: 11, 2: 12})) + expected = SparseArray([10, 11, 12], fill_value=10) + tm.assert_sp_array_equal(result, expected) + + +def test_map_missing(): + arr = SparseArray([0, 1, 2]) + expected = SparseArray([10, 11, None], fill_value=10) + + result = arr.map({0: 10, 1: 11}) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("fill_value", [np.nan, 1]) +def test_dropna(fill_value): + # GH-28287 + arr = SparseArray([np.nan, 1], fill_value=fill_value) + exp = SparseArray([1.0], fill_value=fill_value) + tm.assert_sp_array_equal(arr.dropna(), exp) + + df = pd.DataFrame({"a": [0, 1], "b": arr}) + expected_df = pd.DataFrame({"a": [1], "b": exp}, index=pd.Index([1])) + tm.assert_equal(df.dropna(), expected_df) + + +def test_drop_duplicates_fill_value(): + # GH 11726 + df = pd.DataFrame(np.zeros((5, 5))).apply(lambda x: SparseArray(x, fill_value=0)) + result = df.drop_duplicates() + expected = pd.DataFrame({i: SparseArray([0.0], fill_value=0) for i in range(5)}) + tm.assert_frame_equal(result, expected) + + +def test_zero_sparse_column(): + # GH 27781 + df1 = pd.DataFrame({"A": SparseArray([0, 0, 0]), "B": [1, 2, 3]}) + df2 = pd.DataFrame({"A": SparseArray([0, 1, 0]), "B": [1, 2, 3]}) + result = df1.loc[df1["B"] != 2] + expected = df2.loc[df2["B"] != 2] + tm.assert_frame_equal(result, expected) + + expected = pd.DataFrame({"A": SparseArray([0, 0]), "B": [1, 3]}, index=[0, 2]) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_astype.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..83a507e679d460d687b27d09a4bed8321e1199e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_astype.py @@ -0,0 +1,133 @@ +import numpy as np +import pytest + +from pandas._libs.sparse import IntIndex + +from pandas import ( + SparseDtype, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestAstype: + def test_astype(self): + # float -> float + arr = SparseArray([None, None, 0, 2]) + result = arr.astype("Sparse[float32]") + expected = SparseArray([None, None, 0, 2], dtype=np.dtype("float32")) + tm.assert_sp_array_equal(result, expected) + + dtype = SparseDtype("float64", fill_value=0) + result = arr.astype(dtype) + expected = SparseArray._simple_new( + np.array([0.0, 2.0], dtype=dtype.subtype), IntIndex(4, [2, 3]), dtype + ) + tm.assert_sp_array_equal(result, expected) + + dtype = SparseDtype("int64", 0) + result = arr.astype(dtype) + expected = SparseArray._simple_new( + np.array([0, 2], dtype=np.int64), IntIndex(4, [2, 3]), dtype + ) + tm.assert_sp_array_equal(result, expected) + + arr = SparseArray([0, np.nan, 0, 1], fill_value=0) + with pytest.raises(ValueError, match="NA"): + arr.astype("Sparse[i8]") + + def test_astype_bool(self): + a = SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0)) + result = a.astype(bool) + expected = np.array([1, 0, 0, 1], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + # update fill value + result = a.astype(SparseDtype(bool, False)) + expected = SparseArray( + [True, False, False, True], dtype=SparseDtype(bool, False) + ) + tm.assert_sp_array_equal(result, expected) + + def test_astype_all(self, any_real_numpy_dtype): + vals = np.array([1, 2, 3]) + arr = SparseArray(vals, fill_value=1) + typ = np.dtype(any_real_numpy_dtype) + res = arr.astype(typ) + tm.assert_numpy_array_equal(res, vals.astype(any_real_numpy_dtype)) + + @pytest.mark.parametrize( + "arr, dtype, expected", + [ + ( + SparseArray([0, 1]), + "float", + SparseArray([0.0, 1.0], dtype=SparseDtype(float, 0.0)), + ), + (SparseArray([0, 1]), bool, SparseArray([False, True])), + ( + SparseArray([0, 1], fill_value=1), + bool, + SparseArray([False, True], dtype=SparseDtype(bool, True)), + ), + pytest.param( + SparseArray([0, 1]), + "datetime64[ns]", + SparseArray( + np.array([0, 1], dtype="datetime64[ns]"), + dtype=SparseDtype("datetime64[ns]", Timestamp("1970")), + ), + ), + ( + SparseArray([0, 1, 10]), + str, + SparseArray(["0", "1", "10"], dtype=SparseDtype(str, "0")), + ), + (SparseArray(["10", "20"]), float, SparseArray([10.0, 20.0])), + ( + SparseArray([0, 1, 0]), + object, + SparseArray([0, 1, 0], dtype=SparseDtype(object, 0)), + ), + ], + ) + def test_astype_more(self, arr, dtype, expected): + result = arr.astype(arr.dtype.update_dtype(dtype)) + tm.assert_sp_array_equal(result, expected) + + def test_astype_nan_raises(self): + arr = SparseArray([1.0, np.nan]) + with pytest.raises(ValueError, match="Cannot convert non-finite"): + arr.astype(int) + + def test_astype_copy_false(self): + # GH#34456 bug caused by using .view instead of .astype in astype_nansafe + arr = SparseArray([1, 2, 3]) + + dtype = SparseDtype(float, 0) + + result = arr.astype(dtype, copy=False) + expected = SparseArray([1.0, 2.0, 3.0], fill_value=0.0) + tm.assert_sp_array_equal(result, expected) + + def test_astype_dt64_to_int64(self): + # GH#49631 match non-sparse behavior + values = np.array(["NaT", "2016-01-02", "2016-01-03"], dtype="M8[ns]") + + arr = SparseArray(values) + result = arr.astype("int64") + expected = values.astype("int64") + tm.assert_numpy_array_equal(result, expected) + + # we should also be able to cast to equivalent Sparse[int64] + dtype_int64 = SparseDtype("int64", np.iinfo(np.int64).min) + result2 = arr.astype(dtype_int64) + tm.assert_numpy_array_equal(result2.to_numpy(), expected) + + # GH#50087 we should match the non-sparse behavior regardless of + # if we have a fill_value other than NaT + dtype = SparseDtype("datetime64[ns]", values[1]) + arr3 = SparseArray(values, dtype=dtype) + result3 = arr3.astype("int64") + tm.assert_numpy_array_equal(result3, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py new file mode 100644 index 0000000000000000000000000000000000000000..0f09af269148bc6fec712b9b1df63cca6f44d248 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py @@ -0,0 +1,62 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestSparseArrayConcat: + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_basic(self, kind): + a = SparseArray([1, 0, 0, 2], kind=kind) + b = SparseArray([1, 0, 2, 2], kind=kind) + + result = SparseArray._concat_same_type([a, b]) + # Can't make any assertions about the sparse index itself + # since we aren't don't merge sparse blocs across arrays + # in to_concat + expected = np.array([1, 2, 1, 2, 2], dtype="int64") + tm.assert_numpy_array_equal(result.sp_values, expected) + assert result.kind == kind + + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_uses_first_kind(self, kind): + other = "integer" if kind == "block" else "block" + a = SparseArray([1, 0, 0, 2], kind=kind) + b = SparseArray([1, 0, 2, 2], kind=other) + + result = SparseArray._concat_same_type([a, b]) + expected = np.array([1, 2, 1, 2, 2], dtype="int64") + tm.assert_numpy_array_equal(result.sp_values, expected) + assert result.kind == kind + + +@pytest.mark.parametrize( + "other, expected_dtype", + [ + # compatible dtype -> preserve sparse + (pd.Series([3, 4, 5], dtype="int64"), pd.SparseDtype("int64", 0)), + # (pd.Series([3, 4, 5], dtype="Int64"), pd.SparseDtype("int64", 0)), + # incompatible dtype -> Sparse[common dtype] + (pd.Series([1.5, 2.5, 3.5], dtype="float64"), pd.SparseDtype("float64", 0)), + # incompatible dtype -> Sparse[object] dtype + (pd.Series(["a", "b", "c"], dtype=object), pd.SparseDtype(object, 0)), + # categorical with compatible categories -> dtype of the categories + (pd.Series([3, 4, 5], dtype="category"), np.dtype("int64")), + (pd.Series([1.5, 2.5, 3.5], dtype="category"), np.dtype("float64")), + # categorical with incompatible categories -> object dtype + (pd.Series(["a", "b", "c"], dtype="category"), np.dtype(object)), + ], +) +def test_concat_with_non_sparse(other, expected_dtype): + # https://github.com/pandas-dev/pandas/issues/34336 + s_sparse = pd.Series([1, 0, 2], dtype=pd.SparseDtype("int64", 0)) + + result = pd.concat([s_sparse, other], ignore_index=True) + expected = pd.Series(list(s_sparse) + list(other)).astype(expected_dtype) + tm.assert_series_equal(result, expected) + + result = pd.concat([other, s_sparse], ignore_index=True) + expected = pd.Series(list(other) + list(s_sparse)).astype(expected_dtype) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_constructors.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..2831c8abdaf137b6454ea8f73bff7e94a3ec1b2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_constructors.py @@ -0,0 +1,285 @@ +import numpy as np +import pytest + +from pandas._libs.sparse import IntIndex + +import pandas as pd +from pandas import ( + SparseDtype, + isna, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestConstructors: + def test_constructor_dtype(self): + arr = SparseArray([np.nan, 1, 2, np.nan]) + assert arr.dtype == SparseDtype(np.float64, np.nan) + assert arr.dtype.subtype == np.float64 + assert np.isnan(arr.fill_value) + + arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0) + assert arr.dtype == SparseDtype(np.float64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], dtype=np.float64) + assert arr.dtype == SparseDtype(np.float64, np.nan) + assert np.isnan(arr.fill_value) + + arr = SparseArray([0, 1, 2, 4], dtype=np.int64) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], dtype=None) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + def test_constructor_dtype_str(self): + result = SparseArray([1, 2, 3], dtype="int") + expected = SparseArray([1, 2, 3], dtype=int) + tm.assert_sp_array_equal(result, expected) + + def test_constructor_sparse_dtype(self): + result = SparseArray([1, 0, 0, 1], dtype=SparseDtype("int64", -1)) + expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64) + tm.assert_sp_array_equal(result, expected) + assert result.sp_values.dtype == np.dtype("int64") + + def test_constructor_sparse_dtype_str(self): + result = SparseArray([1, 0, 0, 1], dtype="Sparse[int32]") + expected = SparseArray([1, 0, 0, 1], dtype=np.int32) + tm.assert_sp_array_equal(result, expected) + assert result.sp_values.dtype == np.dtype("int32") + + def test_constructor_object_dtype(self): + # GH#11856 + arr = SparseArray(["A", "A", np.nan, "B"], dtype=object) + assert arr.dtype == SparseDtype(object) + assert np.isnan(arr.fill_value) + + arr = SparseArray(["A", "A", np.nan, "B"], dtype=object, fill_value="A") + assert arr.dtype == SparseDtype(object, "A") + assert arr.fill_value == "A" + + def test_constructor_object_dtype_bool_fill(self): + # GH#17574 + data = [False, 0, 100.0, 0.0] + arr = SparseArray(data, dtype=object, fill_value=False) + assert arr.dtype == SparseDtype(object, False) + assert arr.fill_value is False + arr_expected = np.array(data, dtype=object) + it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected)) + assert np.fromiter(it, dtype=np.bool_).all() + + @pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int]) + def test_constructor_na_dtype(self, dtype): + with pytest.raises(ValueError, match="Cannot convert"): + SparseArray([0, 1, np.nan], dtype=dtype) + + def test_constructor_warns_when_losing_timezone(self): + # GH#32501 warn when losing timezone information + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + + expected = SparseArray(np.asarray(dti, dtype="datetime64[ns]")) + + with tm.assert_produces_warning(UserWarning): + result = SparseArray(dti) + + tm.assert_sp_array_equal(result, expected) + + with tm.assert_produces_warning(UserWarning): + result = SparseArray(pd.Series(dti)) + + tm.assert_sp_array_equal(result, expected) + + def test_constructor_spindex_dtype(self): + arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2])) + # TODO: actionable? + # XXX: Behavior change: specifying SparseIndex no longer changes the + # fill_value + expected = SparseArray([0, 1, 2, 0], kind="integer") + tm.assert_sp_array_equal(arr, expected) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + arr = SparseArray( + data=[1, 2, 3], + sparse_index=IntIndex(4, [1, 2, 3]), + dtype=np.int64, + fill_value=0, + ) + exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + arr = SparseArray( + data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=np.int64 + ) + exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + arr = SparseArray( + data=[1, 2, 3], + sparse_index=IntIndex(4, [1, 2, 3]), + dtype=None, + fill_value=0, + ) + exp = SparseArray([0, 1, 2, 3], dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + @pytest.mark.parametrize("sparse_index", [None, IntIndex(1, [0])]) + def test_constructor_spindex_dtype_scalar(self, sparse_index): + # scalar input + msg = "Constructing SparseArray with scalar data is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None) + exp = SparseArray([1], dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + with tm.assert_produces_warning(FutureWarning, match=msg): + arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None) + exp = SparseArray([1], dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + def test_constructor_spindex_dtype_scalar_broadcasts(self): + arr = SparseArray( + data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=None + ) + exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + @pytest.mark.parametrize( + "data, fill_value", + [ + (np.array([1, 2]), 0), + (np.array([1.0, 2.0]), np.nan), + ([True, False], False), + ([pd.Timestamp("2017-01-01")], pd.NaT), + ], + ) + def test_constructor_inferred_fill_value(self, data, fill_value): + result = SparseArray(data).fill_value + + if isna(fill_value): + assert isna(result) + else: + assert result == fill_value + + @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) + @pytest.mark.parametrize("size", [0, 10]) + def test_from_spmatrix(self, size, format): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.random(size, 1, density=0.5, format=format) + result = SparseArray.from_spmatrix(mat) + + result = np.asarray(result) + expected = mat.toarray().ravel() + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) + def test_from_spmatrix_including_explicit_zero(self, format): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.random(10, 1, density=0.5, format=format) + mat.data[0] = 0 + result = SparseArray.from_spmatrix(mat) + + result = np.asarray(result) + expected = mat.toarray().ravel() + tm.assert_numpy_array_equal(result, expected) + + def test_from_spmatrix_raises(self): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.eye(5, 4, format="csc") + + with pytest.raises(ValueError, match="not '4'"): + SparseArray.from_spmatrix(mat) + + def test_constructor_from_too_large_array(self): + with pytest.raises(TypeError, match="expected dimension <= 1 data"): + SparseArray(np.arange(10).reshape((2, 5))) + + def test_constructor_from_sparse(self): + zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + res = SparseArray(zarr) + assert res.fill_value == 0 + tm.assert_almost_equal(res.sp_values, zarr.sp_values) + + def test_constructor_copy(self): + arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) + arr = SparseArray(arr_data) + + cp = SparseArray(arr, copy=True) + cp.sp_values[:3] = 0 + assert not (arr.sp_values[:3] == 0).any() + + not_copy = SparseArray(arr) + not_copy.sp_values[:3] = 0 + assert (arr.sp_values[:3] == 0).all() + + def test_constructor_bool(self): + # GH#10648 + data = np.array([False, False, True, True, False, False]) + arr = SparseArray(data, fill_value=False, dtype=bool) + + assert arr.dtype == SparseDtype(bool) + tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True])) + # Behavior change: np.asarray densifies. + # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr)) + tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([2, 3], np.int32)) + + dense = arr.to_dense() + assert dense.dtype == bool + tm.assert_numpy_array_equal(dense, data) + + def test_constructor_bool_fill_value(self): + arr = SparseArray([True, False, True], dtype=None) + assert arr.dtype == SparseDtype(np.bool_) + assert not arr.fill_value + + arr = SparseArray([True, False, True], dtype=np.bool_) + assert arr.dtype == SparseDtype(np.bool_) + assert not arr.fill_value + + arr = SparseArray([True, False, True], dtype=np.bool_, fill_value=True) + assert arr.dtype == SparseDtype(np.bool_, True) + assert arr.fill_value + + def test_constructor_float32(self): + # GH#10648 + data = np.array([1.0, np.nan, 3], dtype=np.float32) + arr = SparseArray(data, dtype=np.float32) + + assert arr.dtype == SparseDtype(np.float32) + tm.assert_numpy_array_equal(arr.sp_values, np.array([1, 3], dtype=np.float32)) + # Behavior change: np.asarray densifies. + # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr)) + tm.assert_numpy_array_equal( + arr.sp_index.indices, np.array([0, 2], dtype=np.int32) + ) + + dense = arr.to_dense() + assert dense.dtype == np.float32 + tm.assert_numpy_array_equal(dense, data) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_dtype.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..234f4092421e592b9c11b668e25f29fa108f13f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_dtype.py @@ -0,0 +1,224 @@ +import re +import warnings + +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype + + +@pytest.mark.parametrize( + "dtype, fill_value", + [ + ("int", 0), + ("float", np.nan), + ("bool", False), + ("object", np.nan), + ("datetime64[ns]", np.datetime64("NaT", "ns")), + ("timedelta64[ns]", np.timedelta64("NaT", "ns")), + ], +) +def test_inferred_dtype(dtype, fill_value): + sparse_dtype = SparseDtype(dtype) + result = sparse_dtype.fill_value + if pd.isna(fill_value): + assert pd.isna(result) and type(result) == type(fill_value) + else: + assert result == fill_value + + +def test_from_sparse_dtype(): + dtype = SparseDtype("float", 0) + result = SparseDtype(dtype) + assert result.fill_value == 0 + + +def test_from_sparse_dtype_fill_value(): + dtype = SparseDtype("int", 1) + result = SparseDtype(dtype, fill_value=2) + expected = SparseDtype("int", 2) + assert result == expected + + +@pytest.mark.parametrize( + "dtype, fill_value", + [ + ("int", None), + ("float", None), + ("bool", None), + ("object", None), + ("datetime64[ns]", None), + ("timedelta64[ns]", None), + ("int", np.nan), + ("float", 0), + ], +) +def test_equal(dtype, fill_value): + a = SparseDtype(dtype, fill_value) + b = SparseDtype(dtype, fill_value) + assert a == b + assert b == a + + +def test_nans_equal(): + a = SparseDtype(float, float("nan")) + b = SparseDtype(float, np.nan) + assert a == b + assert b == a + + +with warnings.catch_warnings(): + msg = "Allowing arbitrary scalar fill_value in SparseDtype is deprecated" + warnings.filterwarnings("ignore", msg, category=FutureWarning) + + tups = [ + (SparseDtype("float64"), SparseDtype("float32")), + (SparseDtype("float64"), SparseDtype("float64", 0)), + (SparseDtype("float64"), SparseDtype("datetime64[ns]", np.nan)), + (SparseDtype(int, pd.NaT), SparseDtype(float, pd.NaT)), + (SparseDtype("float64"), np.dtype("float64")), + ] + + +@pytest.mark.parametrize( + "a, b", + tups, +) +def test_not_equal(a, b): + assert a != b + + +def test_construct_from_string_raises(): + with pytest.raises( + TypeError, match="Cannot construct a 'SparseDtype' from 'not a dtype'" + ): + SparseDtype.construct_from_string("not a dtype") + + +@pytest.mark.parametrize( + "dtype, expected", + [ + (SparseDtype(int), True), + (SparseDtype(float), True), + (SparseDtype(bool), True), + (SparseDtype(object), False), + (SparseDtype(str), False), + ], +) +def test_is_numeric(dtype, expected): + assert dtype._is_numeric is expected + + +def test_str_uses_object(): + result = SparseDtype(str).subtype + assert result == np.dtype("object") + + +@pytest.mark.parametrize( + "string, expected", + [ + ("Sparse[float64]", SparseDtype(np.dtype("float64"))), + ("Sparse[float32]", SparseDtype(np.dtype("float32"))), + ("Sparse[int]", SparseDtype(np.dtype("int"))), + ("Sparse[str]", SparseDtype(np.dtype("str"))), + ("Sparse[datetime64[ns]]", SparseDtype(np.dtype("datetime64[ns]"))), + ("Sparse", SparseDtype(np.dtype("float"), np.nan)), + ], +) +def test_construct_from_string(string, expected): + result = SparseDtype.construct_from_string(string) + assert result == expected + + +@pytest.mark.parametrize( + "a, b, expected", + [ + (SparseDtype(float, 0.0), SparseDtype(np.dtype("float"), 0.0), True), + (SparseDtype(int, 0), SparseDtype(int, 0), True), + (SparseDtype(float, float("nan")), SparseDtype(float, np.nan), True), + (SparseDtype(float, 0), SparseDtype(float, np.nan), False), + (SparseDtype(int, 0.0), SparseDtype(float, 0.0), False), + ], +) +def test_hash_equal(a, b, expected): + result = a == b + assert result is expected + + result = hash(a) == hash(b) + assert result is expected + + +@pytest.mark.parametrize( + "string, expected", + [ + ("Sparse[int]", "int"), + ("Sparse[int, 0]", "int"), + ("Sparse[int64]", "int64"), + ("Sparse[int64, 0]", "int64"), + ("Sparse[datetime64[ns], 0]", "datetime64[ns]"), + ], +) +def test_parse_subtype(string, expected): + subtype, _ = SparseDtype._parse_subtype(string) + assert subtype == expected + + +@pytest.mark.parametrize( + "string", ["Sparse[int, 1]", "Sparse[float, 0.0]", "Sparse[bool, True]"] +) +def test_construct_from_string_fill_value_raises(string): + with pytest.raises(TypeError, match="fill_value in the string is not"): + SparseDtype.construct_from_string(string) + + +@pytest.mark.parametrize( + "original, dtype, expected", + [ + (SparseDtype(int, 0), float, SparseDtype(float, 0.0)), + (SparseDtype(int, 1), float, SparseDtype(float, 1.0)), + (SparseDtype(int, 1), str, SparseDtype(object, "1")), + (SparseDtype(float, 1.5), int, SparseDtype(int, 1)), + ], +) +def test_update_dtype(original, dtype, expected): + result = original.update_dtype(dtype) + assert result == expected + + +@pytest.mark.parametrize( + "original, dtype, expected_error_msg", + [ + ( + SparseDtype(float, np.nan), + int, + re.escape("Cannot convert non-finite values (NA or inf) to integer"), + ), + ( + SparseDtype(str, "abc"), + int, + r"invalid literal for int\(\) with base 10: ('abc'|np\.str_\('abc'\))", + ), + ], +) +def test_update_dtype_raises(original, dtype, expected_error_msg): + with pytest.raises(ValueError, match=expected_error_msg): + original.update_dtype(dtype) + + +def test_repr(): + # GH-34352 + result = str(SparseDtype("int64", fill_value=0)) + expected = "Sparse[int64, 0]" + assert result == expected + + result = str(SparseDtype(object, fill_value="0")) + expected = "Sparse[object, '0']" + assert result == expected + + +def test_sparse_dtype_subtype_must_be_numpy_dtype(): + # GH#53160 + msg = "SparseDtype subtype must be a numpy dtype" + with pytest.raises(TypeError, match=msg): + SparseDtype("category", fill_value="c") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_indexing.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..60029ac06ddb47ef0ad4ee35a75fd09ca12f7f53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_indexing.py @@ -0,0 +1,302 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +@pytest.fixture +def arr_data(): + return np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) + + +@pytest.fixture +def arr(arr_data): + return SparseArray(arr_data) + + +class TestGetitem: + def test_getitem(self, arr): + dense = arr.to_dense() + for i, value in enumerate(arr): + tm.assert_almost_equal(value, dense[i]) + tm.assert_almost_equal(arr[-i], dense[-i]) + + def test_getitem_arraylike_mask(self, arr): + arr = SparseArray([0, 1, 2]) + result = arr[[True, False, True]] + expected = SparseArray([0, 2]) + tm.assert_sp_array_equal(result, expected) + + @pytest.mark.parametrize( + "slc", + [ + np.s_[:], + np.s_[1:10], + np.s_[1:100], + np.s_[10:1], + np.s_[:-3], + np.s_[-5:-4], + np.s_[:-12], + np.s_[-12:], + np.s_[2:], + np.s_[2::3], + np.s_[::2], + np.s_[::-1], + np.s_[::-2], + np.s_[1:6:2], + np.s_[:-6:-2], + ], + ) + @pytest.mark.parametrize( + "as_dense", [[np.nan] * 10, [1] * 10, [np.nan] * 5 + [1] * 5, []] + ) + def test_getslice(self, slc, as_dense): + as_dense = np.array(as_dense) + arr = SparseArray(as_dense) + + result = arr[slc] + expected = SparseArray(as_dense[slc]) + + tm.assert_sp_array_equal(result, expected) + + def test_getslice_tuple(self): + dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0]) + + sparse = SparseArray(dense) + res = sparse[(slice(4, None),)] + exp = SparseArray(dense[4:]) + tm.assert_sp_array_equal(res, exp) + + sparse = SparseArray(dense, fill_value=0) + res = sparse[(slice(4, None),)] + exp = SparseArray(dense[4:], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + msg = "too many indices for array" + with pytest.raises(IndexError, match=msg): + sparse[4:, :] + + with pytest.raises(IndexError, match=msg): + # check numpy compat + dense[4:, :] + + def test_boolean_slice_empty(self): + arr = SparseArray([0, 1, 2]) + res = arr[[False, False, False]] + assert res.dtype == arr.dtype + + def test_getitem_bool_sparse_array(self, arr): + # GH 23122 + spar_bool = SparseArray([False, True] * 5, dtype=np.bool_, fill_value=True) + exp = SparseArray([np.nan, 2, np.nan, 5, 6]) + tm.assert_sp_array_equal(arr[spar_bool], exp) + + spar_bool = ~spar_bool + res = arr[spar_bool] + exp = SparseArray([np.nan, 1, 3, 4, np.nan]) + tm.assert_sp_array_equal(res, exp) + + spar_bool = SparseArray( + [False, True, np.nan] * 3, dtype=np.bool_, fill_value=np.nan + ) + res = arr[spar_bool] + exp = SparseArray([np.nan, 3, 5]) + tm.assert_sp_array_equal(res, exp) + + def test_getitem_bool_sparse_array_as_comparison(self): + # GH 45110 + arr = SparseArray([1, 2, 3, 4, np.nan, np.nan], fill_value=np.nan) + res = arr[arr > 2] + exp = SparseArray([3.0, 4.0], fill_value=np.nan) + tm.assert_sp_array_equal(res, exp) + + def test_get_item(self, arr): + zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + + assert np.isnan(arr[1]) + assert arr[2] == 1 + assert arr[7] == 5 + + assert zarr[0] == 0 + assert zarr[2] == 1 + assert zarr[7] == 5 + + errmsg = "must be an integer between -10 and 10" + + with pytest.raises(IndexError, match=errmsg): + arr[11] + + with pytest.raises(IndexError, match=errmsg): + arr[-11] + + assert arr[-1] == arr[len(arr) - 1] + + +class TestSetitem: + def test_set_item(self, arr_data): + arr = SparseArray(arr_data).copy() + + def setitem(): + arr[5] = 3 + + def setslice(): + arr[1:5] = 2 + + with pytest.raises(TypeError, match="assignment via setitem"): + setitem() + + with pytest.raises(TypeError, match="assignment via setitem"): + setslice() + + +class TestTake: + def test_take_scalar_raises(self, arr): + msg = "'indices' must be an array, not a scalar '2'." + with pytest.raises(ValueError, match=msg): + arr.take(2) + + def test_take(self, arr_data, arr): + exp = SparseArray(np.take(arr_data, [2, 3])) + tm.assert_sp_array_equal(arr.take([2, 3]), exp) + + exp = SparseArray(np.take(arr_data, [0, 1, 2])) + tm.assert_sp_array_equal(arr.take([0, 1, 2]), exp) + + def test_take_all_empty(self): + sparse = pd.array([0, 0], dtype=SparseDtype("int64")) + result = sparse.take([0, 1], allow_fill=True, fill_value=np.nan) + tm.assert_sp_array_equal(sparse, result) + + def test_take_different_fill_value(self): + # Take with a different fill value shouldn't overwrite the original + sparse = pd.array([0.0], dtype=SparseDtype("float64", fill_value=0.0)) + result = sparse.take([0, -1], allow_fill=True, fill_value=np.nan) + expected = pd.array([0, np.nan], dtype=sparse.dtype) + tm.assert_sp_array_equal(expected, result) + + def test_take_fill_value(self): + data = np.array([1, np.nan, 0, 3, 0]) + sparse = SparseArray(data, fill_value=0) + + exp = SparseArray(np.take(data, [0]), fill_value=0) + tm.assert_sp_array_equal(sparse.take([0]), exp) + + exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0) + tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp) + + def test_take_negative(self, arr_data, arr): + exp = SparseArray(np.take(arr_data, [-1])) + tm.assert_sp_array_equal(arr.take([-1]), exp) + + exp = SparseArray(np.take(arr_data, [-4, -3, -2])) + tm.assert_sp_array_equal(arr.take([-4, -3, -2]), exp) + + def test_bad_take(self, arr): + with pytest.raises(IndexError, match="bounds"): + arr.take([11]) + + def test_take_filling(self): + # similar tests as GH 12631 + sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4]) + result = sparse.take(np.array([1, 0, -1])) + expected = SparseArray([np.nan, np.nan, 4]) + tm.assert_sp_array_equal(result, expected) + + # TODO: actionable? + # XXX: test change: fill_value=True -> allow_fill=True + result = sparse.take(np.array([1, 0, -1]), allow_fill=True) + expected = SparseArray([np.nan, np.nan, np.nan]) + tm.assert_sp_array_equal(result, expected) + + # allow_fill=False + result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = SparseArray([np.nan, np.nan, 4]) + tm.assert_sp_array_equal(result, expected) + + msg = "Invalid value in 'indices'" + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -2]), allow_fill=True) + + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -5]), allow_fill=True) + + msg = "out of bounds value in 'indices'" + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, -6])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5]), allow_fill=True) + + def test_take_filling_fill_value(self): + # same tests as GH#12631 + sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0) + result = sparse.take(np.array([1, 0, -1])) + expected = SparseArray([0, np.nan, 4], fill_value=0) + tm.assert_sp_array_equal(result, expected) + + # fill_value + result = sparse.take(np.array([1, 0, -1]), allow_fill=True) + # TODO: actionable? + # XXX: behavior change. + # the old way of filling self.fill_value doesn't follow EA rules. + # It's supposed to be self.dtype.na_value (nan in this case) + expected = SparseArray([0, np.nan, np.nan], fill_value=0) + tm.assert_sp_array_equal(result, expected) + + # allow_fill=False + result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = SparseArray([0, np.nan, 4], fill_value=0) + tm.assert_sp_array_equal(result, expected) + + msg = "Invalid value in 'indices'." + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -2]), allow_fill=True) + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -5]), allow_fill=True) + + msg = "out of bounds value in 'indices'" + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, -6])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5]), fill_value=True) + + @pytest.mark.parametrize("kind", ["block", "integer"]) + def test_take_filling_all_nan(self, kind): + sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan], kind=kind) + result = sparse.take(np.array([1, 0, -1])) + expected = SparseArray([np.nan, np.nan, np.nan], kind=kind) + tm.assert_sp_array_equal(result, expected) + + result = sparse.take(np.array([1, 0, -1]), fill_value=True) + expected = SparseArray([np.nan, np.nan, np.nan], kind=kind) + tm.assert_sp_array_equal(result, expected) + + msg = "out of bounds value in 'indices'" + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, -6])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5]), fill_value=True) + + +class TestWhere: + def test_where_retain_fill_value(self): + # GH#45691 don't lose fill_value on _where + arr = SparseArray([np.nan, 1.0], fill_value=0) + + mask = np.array([True, False]) + + res = arr._where(~mask, 1) + exp = SparseArray([1, 1.0], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + ser = pd.Series(arr) + res = ser.where(~mask, 1) + tm.assert_series_equal(res, pd.Series(exp)) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_libsparse.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_libsparse.py new file mode 100644 index 0000000000000000000000000000000000000000..7a77a2064e7e097f924a3901994d131d98164ad6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_libsparse.py @@ -0,0 +1,551 @@ +import operator + +import numpy as np +import pytest + +import pandas._libs.sparse as splib +import pandas.util._test_decorators as td + +from pandas import Series +import pandas._testing as tm +from pandas.core.arrays.sparse import ( + BlockIndex, + IntIndex, + make_sparse_index, +) + + +@pytest.fixture +def test_length(): + return 20 + + +@pytest.fixture( + params=[ + [ + [0, 7, 15], + [3, 5, 5], + [2, 9, 14], + [2, 3, 5], + [2, 9, 15], + [1, 3, 4], + ], + [ + [0, 5], + [4, 4], + [1], + [4], + [1], + [3], + ], + [ + [0], + [10], + [0, 5], + [3, 7], + [0, 5], + [3, 5], + ], + [ + [10], + [5], + [0, 12], + [5, 3], + [12], + [3], + ], + [ + [0, 10], + [4, 6], + [5, 17], + [4, 2], + [], + [], + ], + [ + [0], + [5], + [], + [], + [], + [], + ], + ], + ids=[ + "plain_case", + "delete_blocks", + "split_blocks", + "skip_block", + "no_intersect", + "one_empty", + ], +) +def cases(request): + return request.param + + +class TestSparseIndexUnion: + @pytest.mark.parametrize( + "xloc, xlen, yloc, ylen, eloc, elen", + [ + [[0], [5], [5], [4], [0], [9]], + [[0, 10], [5, 5], [2, 17], [5, 2], [0, 10, 17], [7, 5, 2]], + [[1], [5], [3], [5], [1], [7]], + [[2, 10], [4, 4], [4], [8], [2], [12]], + [[0, 5], [3, 5], [0], [7], [0], [10]], + [[2, 10], [4, 4], [4, 13], [8, 4], [2], [15]], + [[2], [15], [4, 9, 14], [3, 2, 2], [2], [15]], + [[0, 10], [3, 3], [5, 15], [2, 2], [0, 5, 10, 15], [3, 2, 3, 2]], + ], + ) + def test_index_make_union(self, xloc, xlen, yloc, ylen, eloc, elen, test_length): + # Case 1 + # x: ---- + # y: ---- + # r: -------- + # Case 2 + # x: ----- ----- + # y: ----- -- + # Case 3 + # x: ------ + # y: ------- + # r: ---------- + # Case 4 + # x: ------ ----- + # y: ------- + # r: ------------- + # Case 5 + # x: --- ----- + # y: ------- + # r: ------------- + # Case 6 + # x: ------ ----- + # y: ------- --- + # r: ------------- + # Case 7 + # x: ---------------------- + # y: ---- ---- --- + # r: ---------------------- + # Case 8 + # x: ---- --- + # y: --- --- + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + bresult = xindex.make_union(yindex) + assert isinstance(bresult, BlockIndex) + tm.assert_numpy_array_equal(bresult.blocs, np.array(eloc, dtype=np.int32)) + tm.assert_numpy_array_equal(bresult.blengths, np.array(elen, dtype=np.int32)) + + ixindex = xindex.to_int_index() + iyindex = yindex.to_int_index() + iresult = ixindex.make_union(iyindex) + assert isinstance(iresult, IntIndex) + tm.assert_numpy_array_equal(iresult.indices, bresult.to_int_index().indices) + + def test_int_index_make_union(self): + a = IntIndex(5, np.array([0, 3, 4], dtype=np.int32)) + b = IntIndex(5, np.array([0, 2], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([0, 2, 3, 4], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([], dtype=np.int32)) + b = IntIndex(5, np.array([0, 2], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([0, 2], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([], dtype=np.int32)) + b = IntIndex(5, np.array([], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32)) + b = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([0, 1, 2, 3, 4], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([0, 1], dtype=np.int32)) + b = IntIndex(4, np.array([0, 1], dtype=np.int32)) + + msg = "Indices must reference same underlying length" + with pytest.raises(ValueError, match=msg): + a.make_union(b) + + +class TestSparseIndexIntersect: + @td.skip_if_windows + def test_intersect(self, cases, test_length): + xloc, xlen, yloc, ylen, eloc, elen = cases + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + expected = BlockIndex(test_length, eloc, elen) + longer_index = BlockIndex(test_length + 1, yloc, ylen) + + result = xindex.intersect(yindex) + assert result.equals(expected) + result = xindex.to_int_index().intersect(yindex.to_int_index()) + assert result.equals(expected.to_int_index()) + + msg = "Indices must reference same underlying length" + with pytest.raises(Exception, match=msg): + xindex.intersect(longer_index) + with pytest.raises(Exception, match=msg): + xindex.to_int_index().intersect(longer_index.to_int_index()) + + def test_intersect_empty(self): + xindex = IntIndex(4, np.array([], dtype=np.int32)) + yindex = IntIndex(4, np.array([2, 3], dtype=np.int32)) + assert xindex.intersect(yindex).equals(xindex) + assert yindex.intersect(xindex).equals(xindex) + + xindex = xindex.to_block_index() + yindex = yindex.to_block_index() + assert xindex.intersect(yindex).equals(xindex) + assert yindex.intersect(xindex).equals(xindex) + + @pytest.mark.parametrize( + "case", + [ + # Argument 2 to "IntIndex" has incompatible type "ndarray[Any, + # dtype[signedinteger[_32Bit]]]"; expected "Sequence[int]" + IntIndex(5, np.array([1, 2], dtype=np.int32)), # type: ignore[arg-type] + IntIndex(5, np.array([0, 2, 4], dtype=np.int32)), # type: ignore[arg-type] + IntIndex(0, np.array([], dtype=np.int32)), # type: ignore[arg-type] + IntIndex(5, np.array([], dtype=np.int32)), # type: ignore[arg-type] + ], + ) + def test_intersect_identical(self, case): + assert case.intersect(case).equals(case) + case = case.to_block_index() + assert case.intersect(case).equals(case) + + +class TestSparseIndexCommon: + def test_int_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) + + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer" + ) + assert isinstance(idx, IntIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) + + def test_block_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 3 + tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32)) + + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_lookup(self, kind): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind) + assert idx.lookup(-1) == -1 + assert idx.lookup(0) == -1 + assert idx.lookup(1) == -1 + assert idx.lookup(2) == 0 + assert idx.lookup(3) == 1 + assert idx.lookup(4) == -1 + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind) + + for i in range(-1, 5): + assert idx.lookup(i) == -1 + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) + assert idx.lookup(-1) == -1 + assert idx.lookup(0) == 0 + assert idx.lookup(1) == 1 + assert idx.lookup(2) == 2 + assert idx.lookup(3) == 3 + assert idx.lookup(4) == -1 + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) + assert idx.lookup(-1) == -1 + assert idx.lookup(0) == 0 + assert idx.lookup(1) == -1 + assert idx.lookup(2) == 1 + assert idx.lookup(3) == 2 + assert idx.lookup(4) == -1 + + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_lookup_array(self, kind): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind) + + res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) + exp = np.array([-1, -1, 0], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32)) + exp = np.array([-1, 0, -1, 1], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind) + res = idx.lookup_array(np.array([-1, 0, 2, 4], dtype=np.int32)) + exp = np.array([-1, -1, -1, -1], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) + res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) + exp = np.array([-1, 0, 2], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32)) + exp = np.array([-1, 2, 1, 3], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) + res = idx.lookup_array(np.array([2, 1, 3, 0], dtype=np.int32)) + exp = np.array([1, -1, 2, 0], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + res = idx.lookup_array(np.array([1, 4, 2, 5], dtype=np.int32)) + exp = np.array([-1, -1, 1, -1], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + @pytest.mark.parametrize( + "idx, expected", + [ + [0, -1], + [5, 0], + [7, 2], + [8, -1], + [9, -1], + [10, -1], + [11, -1], + [12, 3], + [17, 8], + [18, -1], + ], + ) + def test_lookup_basics(self, idx, expected): + bindex = BlockIndex(20, [5, 12], [3, 6]) + assert bindex.lookup(idx) == expected + + iindex = bindex.to_int_index() + assert iindex.lookup(idx) == expected + + +class TestBlockIndex: + def test_block_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 3 + tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32)) + + @pytest.mark.parametrize("i", [5, 10, 100, 101]) + def test_make_block_boundary(self, i): + idx = make_sparse_index(i, np.arange(0, i, 2, dtype=np.int32), kind="block") + + exp = np.arange(0, i, 2, dtype=np.int32) + tm.assert_numpy_array_equal(idx.blocs, exp) + tm.assert_numpy_array_equal(idx.blengths, np.ones(len(exp), dtype=np.int32)) + + def test_equals(self): + index = BlockIndex(10, [0, 4], [2, 5]) + + assert index.equals(index) + assert not index.equals(BlockIndex(10, [0, 4], [2, 6])) + + def test_check_integrity(self): + locs = [] + lengths = [] + + # 0-length OK + BlockIndex(0, locs, lengths) + + # also OK even though empty + BlockIndex(1, locs, lengths) + + msg = "Block 0 extends beyond end" + with pytest.raises(ValueError, match=msg): + BlockIndex(10, [5], [10]) + + msg = "Block 0 overlaps" + with pytest.raises(ValueError, match=msg): + BlockIndex(10, [2, 5], [5, 3]) + + def test_to_int_index(self): + locs = [0, 10] + lengths = [4, 6] + exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15] + + block = BlockIndex(20, locs, lengths) + dense = block.to_int_index() + + tm.assert_numpy_array_equal(dense.indices, np.array(exp_inds, dtype=np.int32)) + + def test_to_block_index(self): + index = BlockIndex(10, [0, 5], [4, 5]) + assert index.to_block_index() is index + + +class TestIntIndex: + def test_check_integrity(self): + # Too many indices than specified in self.length + msg = "Too many indices" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=1, indices=[1, 2, 3]) + + # No index can be negative. + msg = "No index can be less than zero" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, -2, 3]) + + # No index can be negative. + msg = "No index can be less than zero" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, -2, 3]) + + # All indices must be less than the length. + msg = "All indices must be less than the length" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 2, 5]) + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 2, 6]) + + # Indices must be strictly ascending. + msg = "Indices must be strictly increasing" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 3, 2]) + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 3, 3]) + + def test_int_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) + + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer" + ) + assert isinstance(idx, IntIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) + + def test_equals(self): + index = IntIndex(10, [0, 1, 2, 3, 4]) + assert index.equals(index) + assert not index.equals(IntIndex(10, [0, 1, 2, 3])) + + def test_to_block_index(self, cases, test_length): + xloc, xlen, yloc, ylen, _, _ = cases + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + + # see if survive the round trip + xbindex = xindex.to_int_index().to_block_index() + ybindex = yindex.to_int_index().to_block_index() + assert isinstance(xbindex, BlockIndex) + assert xbindex.equals(xindex) + assert ybindex.equals(yindex) + + def test_to_int_index(self): + index = IntIndex(10, [2, 3, 4, 5, 6]) + assert index.to_int_index() is index + + +class TestSparseOperators: + @pytest.mark.parametrize("opname", ["add", "sub", "mul", "truediv", "floordiv"]) + def test_op(self, opname, cases, test_length): + xloc, xlen, yloc, ylen, _, _ = cases + sparse_op = getattr(splib, f"sparse_{opname}_float64") + python_op = getattr(operator, opname) + + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + + xdindex = xindex.to_int_index() + ydindex = yindex.to_int_index() + + x = np.arange(xindex.npoints) * 10.0 + 1 + y = np.arange(yindex.npoints) * 100.0 + 1 + + xfill = 0 + yfill = 2 + + result_block_vals, rb_index, bfill = sparse_op( + x, xindex, xfill, y, yindex, yfill + ) + result_int_vals, ri_index, ifill = sparse_op( + x, xdindex, xfill, y, ydindex, yfill + ) + + assert rb_index.to_int_index().equals(ri_index) + tm.assert_numpy_array_equal(result_block_vals, result_int_vals) + assert bfill == ifill + + # check versus Series... + xseries = Series(x, xdindex.indices) + xseries = xseries.reindex(np.arange(test_length)).fillna(xfill) + + yseries = Series(y, ydindex.indices) + yseries = yseries.reindex(np.arange(test_length)).fillna(yfill) + + series_result = python_op(xseries, yseries) + series_result = series_result.reindex(ri_index.indices) + + tm.assert_numpy_array_equal(result_block_vals, series_result.values) + tm.assert_numpy_array_equal(result_int_vals, series_result.values) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_reductions.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..f44423d5e635c3f74725db219d48fcaca27c4d53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_reductions.py @@ -0,0 +1,306 @@ +import numpy as np +import pytest + +from pandas import ( + NaT, + SparseDtype, + Timestamp, + isna, +) +from pandas.core.arrays.sparse import SparseArray + + +class TestReductions: + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([True, True, True], True, False), + ([1, 2, 1], 1, 0), + ([1.0, 2.0, 1.0], 1.0, 0.0), + ], + ) + def test_all(self, data, pos, neg): + # GH#17570 + out = SparseArray(data).all() + assert out + + out = SparseArray(data, fill_value=pos).all() + assert out + + data[1] = neg + out = SparseArray(data).all() + assert not out + + out = SparseArray(data, fill_value=pos).all() + assert not out + + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([True, True, True], True, False), + ([1, 2, 1], 1, 0), + ([1.0, 2.0, 1.0], 1.0, 0.0), + ], + ) + def test_numpy_all(self, data, pos, neg): + # GH#17570 + out = np.all(SparseArray(data)) + assert out + + out = np.all(SparseArray(data, fill_value=pos)) + assert out + + data[1] = neg + out = np.all(SparseArray(data)) + assert not out + + out = np.all(SparseArray(data, fill_value=pos)) + assert not out + + # raises with a different message on py2. + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.all(SparseArray(data), out=np.array([])) + + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([False, True, False], True, False), + ([0, 2, 0], 2, 0), + ([0.0, 2.0, 0.0], 2.0, 0.0), + ], + ) + def test_any(self, data, pos, neg): + # GH#17570 + out = SparseArray(data).any() + assert out + + out = SparseArray(data, fill_value=pos).any() + assert out + + data[1] = neg + out = SparseArray(data).any() + assert not out + + out = SparseArray(data, fill_value=pos).any() + assert not out + + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([False, True, False], True, False), + ([0, 2, 0], 2, 0), + ([0.0, 2.0, 0.0], 2.0, 0.0), + ], + ) + def test_numpy_any(self, data, pos, neg): + # GH#17570 + out = np.any(SparseArray(data)) + assert out + + out = np.any(SparseArray(data, fill_value=pos)) + assert out + + data[1] = neg + out = np.any(SparseArray(data)) + assert not out + + out = np.any(SparseArray(data, fill_value=pos)) + assert not out + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.any(SparseArray(data), out=out) + + def test_sum(self): + data = np.arange(10).astype(float) + out = SparseArray(data).sum() + assert out == 45.0 + + data[5] = np.nan + out = SparseArray(data, fill_value=2).sum() + assert out == 40.0 + + out = SparseArray(data, fill_value=np.nan).sum() + assert out == 40.0 + + @pytest.mark.parametrize( + "arr", + [np.array([0, 1, np.nan, 1]), np.array([0, 1, 1])], + ) + @pytest.mark.parametrize("fill_value", [0, 1, np.nan]) + @pytest.mark.parametrize("min_count, expected", [(3, 2), (4, np.nan)]) + def test_sum_min_count(self, arr, fill_value, min_count, expected): + # GH#25777 + sparray = SparseArray(arr, fill_value=fill_value) + result = sparray.sum(min_count=min_count) + if np.isnan(expected): + assert np.isnan(result) + else: + assert result == expected + + def test_bool_sum_min_count(self): + spar_bool = SparseArray([False, True] * 5, dtype=np.bool_, fill_value=True) + res = spar_bool.sum(min_count=1) + assert res == 5 + res = spar_bool.sum(min_count=11) + assert isna(res) + + def test_numpy_sum(self): + data = np.arange(10).astype(float) + out = np.sum(SparseArray(data)) + assert out == 45.0 + + data[5] = np.nan + out = np.sum(SparseArray(data, fill_value=2)) + assert out == 40.0 + + out = np.sum(SparseArray(data, fill_value=np.nan)) + assert out == 40.0 + + msg = "the 'dtype' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.sum(SparseArray(data), dtype=np.int64) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.sum(SparseArray(data), out=out) + + def test_mean(self): + data = np.arange(10).astype(float) + out = SparseArray(data).mean() + assert out == 4.5 + + data[5] = np.nan + out = SparseArray(data).mean() + assert out == 40.0 / 9 + + def test_numpy_mean(self): + data = np.arange(10).astype(float) + out = np.mean(SparseArray(data)) + assert out == 4.5 + + data[5] = np.nan + out = np.mean(SparseArray(data)) + assert out == 40.0 / 9 + + msg = "the 'dtype' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.mean(SparseArray(data), dtype=np.int64) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.mean(SparseArray(data), out=out) + + +class TestMinMax: + @pytest.mark.parametrize( + "raw_data,max_expected,min_expected", + [ + (np.arange(5.0), [4], [0]), + (-np.arange(5.0), [0], [-4]), + (np.array([0, 1, 2, np.nan, 4]), [4], [0]), + (np.array([np.nan] * 5), [np.nan], [np.nan]), + (np.array([]), [np.nan], [np.nan]), + ], + ) + def test_nan_fill_value(self, raw_data, max_expected, min_expected): + arr = SparseArray(raw_data) + max_result = arr.max() + min_result = arr.min() + assert max_result in max_expected + assert min_result in min_expected + + max_result = arr.max(skipna=False) + min_result = arr.min(skipna=False) + if np.isnan(raw_data).any(): + assert np.isnan(max_result) + assert np.isnan(min_result) + else: + assert max_result in max_expected + assert min_result in min_expected + + @pytest.mark.parametrize( + "fill_value,max_expected,min_expected", + [ + (100, 100, 0), + (-100, 1, -100), + ], + ) + def test_fill_value(self, fill_value, max_expected, min_expected): + arr = SparseArray( + np.array([fill_value, 0, 1]), dtype=SparseDtype("int", fill_value) + ) + max_result = arr.max() + assert max_result == max_expected + + min_result = arr.min() + assert min_result == min_expected + + def test_only_fill_value(self): + fv = 100 + arr = SparseArray(np.array([fv, fv, fv]), dtype=SparseDtype("int", fv)) + assert len(arr._valid_sp_values) == 0 + + assert arr.max() == fv + assert arr.min() == fv + assert arr.max(skipna=False) == fv + assert arr.min(skipna=False) == fv + + @pytest.mark.parametrize("func", ["min", "max"]) + @pytest.mark.parametrize("data", [np.array([]), np.array([np.nan, np.nan])]) + @pytest.mark.parametrize( + "dtype,expected", + [ + (SparseDtype(np.float64, np.nan), np.nan), + (SparseDtype(np.float64, 5.0), np.nan), + (SparseDtype("datetime64[ns]", NaT), NaT), + (SparseDtype("datetime64[ns]", Timestamp("2018-05-05")), NaT), + ], + ) + def test_na_value_if_no_valid_values(self, func, data, dtype, expected): + arr = SparseArray(data, dtype=dtype) + result = getattr(arr, func)() + if expected is NaT: + # TODO: pin down whether we wrap datetime64("NaT") + assert result is NaT or np.isnat(result) + else: + assert np.isnan(result) + + +class TestArgmaxArgmin: + @pytest.mark.parametrize( + "arr,argmax_expected,argmin_expected", + [ + (SparseArray([1, 2, 0, 1, 2]), 1, 2), + (SparseArray([-1, -2, 0, -1, -2]), 2, 1), + (SparseArray([np.nan, 1, 0, 0, np.nan, -1]), 1, 5), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2]), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=-1), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=0), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=1), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=2), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=3), 5, 2), + (SparseArray([0] * 10 + [-1], fill_value=0), 0, 10), + (SparseArray([0] * 10 + [-1], fill_value=-1), 0, 10), + (SparseArray([0] * 10 + [-1], fill_value=1), 0, 10), + (SparseArray([-1] + [0] * 10, fill_value=0), 1, 0), + (SparseArray([1] + [0] * 10, fill_value=0), 0, 1), + (SparseArray([-1] + [0] * 10, fill_value=-1), 1, 0), + (SparseArray([1] + [0] * 10, fill_value=1), 0, 1), + ], + ) + def test_argmax_argmin(self, arr, argmax_expected, argmin_expected): + argmax_result = arr.argmax() + argmin_result = arr.argmin() + assert argmax_result == argmax_expected + assert argmin_result == argmin_expected + + @pytest.mark.parametrize( + "arr,method", + [(SparseArray([]), "argmax"), (SparseArray([]), "argmin")], + ) + def test_empty_array(self, arr, method): + msg = f"attempt to get {method} of an empty sequence" + with pytest.raises(ValueError, match=msg): + arr.argmax() if method == "argmax" else arr.argmin() diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_unary.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_unary.py new file mode 100644 index 0000000000000000000000000000000000000000..c00a73773fdd4795e3d5d7f030a591a060dc3bfc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_unary.py @@ -0,0 +1,79 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import SparseArray + + +@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning") +@pytest.mark.parametrize("fill_value", [0, np.nan]) +@pytest.mark.parametrize("op", [operator.pos, operator.neg]) +def test_unary_op(op, fill_value): + arr = np.array([0, 1, np.nan, 2]) + sparray = SparseArray(arr, fill_value=fill_value) + result = op(sparray) + expected = SparseArray(op(arr), fill_value=op(fill_value)) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("fill_value", [True, False]) +def test_invert(fill_value): + arr = np.array([True, False, False, True]) + sparray = SparseArray(arr, fill_value=fill_value) + result = ~sparray + expected = SparseArray(~arr, fill_value=not fill_value) + tm.assert_sp_array_equal(result, expected) + + result = ~pd.Series(sparray) + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + result = ~pd.DataFrame({"A": sparray}) + expected = pd.DataFrame({"A": expected}) + tm.assert_frame_equal(result, expected) + + +class TestUnaryMethods: + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in cast:RuntimeWarning" + ) + def test_neg_operator(self): + arr = SparseArray([-1, -2, np.nan, 3], fill_value=np.nan, dtype=np.int8) + res = -arr + exp = SparseArray([1, 2, np.nan, -3], fill_value=np.nan, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + arr = SparseArray([-1, -2, 1, 3], fill_value=-1, dtype=np.int8) + res = -arr + exp = SparseArray([1, 2, -1, -3], fill_value=1, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in cast:RuntimeWarning" + ) + def test_abs_operator(self): + arr = SparseArray([-1, -2, np.nan, 3], fill_value=np.nan, dtype=np.int8) + res = abs(arr) + exp = SparseArray([1, 2, np.nan, 3], fill_value=np.nan, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + arr = SparseArray([-1, -2, 1, 3], fill_value=-1, dtype=np.int8) + res = abs(arr) + exp = SparseArray([1, 2, 1, 3], fill_value=1, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + def test_invert_operator(self): + arr = SparseArray([False, True, False, True], fill_value=False, dtype=np.bool_) + exp = SparseArray( + np.invert([False, True, False, True]), fill_value=True, dtype=np.bool_ + ) + res = ~arr + tm.assert_sp_array_equal(exp, res) + + arr = SparseArray([0, 1, 0, 2, 3, 0], fill_value=0, dtype=np.int32) + res = ~arr + exp = SparseArray([-1, -2, -1, -3, -4, -1], fill_value=-1, dtype=np.int32) + tm.assert_sp_array_equal(exp, res) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/io/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..ab6cacc4cc860d0d4c0ffe948274252daae2ee27 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/conftest.py @@ -0,0 +1,242 @@ +import shlex +import subprocess +import time +import uuid + +import pytest + +from pandas.compat import ( + is_ci_environment, + is_platform_arm, + is_platform_mac, + is_platform_windows, +) +import pandas.util._test_decorators as td + +import pandas.io.common as icom +from pandas.io.parsers import read_csv + + +@pytest.fixture +def compression_to_extension(): + return {value: key for key, value in icom.extension_to_compression.items()} + + +@pytest.fixture +def tips_file(datapath): + """Path to the tips dataset""" + return datapath("io", "data", "csv", "tips.csv") + + +@pytest.fixture +def jsonl_file(datapath): + """Path to a JSONL dataset""" + return datapath("io", "parser", "data", "items.jsonl") + + +@pytest.fixture +def salaries_table(datapath): + """DataFrame with the salaries dataset""" + return read_csv(datapath("io", "parser", "data", "salaries.csv"), sep="\t") + + +@pytest.fixture +def feather_file(datapath): + return datapath("io", "data", "feather", "feather-0_3_1.feather") + + +@pytest.fixture +def xml_file(datapath): + return datapath("io", "data", "xml", "books.xml") + + +@pytest.fixture +def s3_base(worker_id, monkeypatch): + """ + Fixture for mocking S3 interaction. + + Sets up moto server in separate process locally + Return url for motoserver/moto CI service + """ + pytest.importorskip("s3fs") + pytest.importorskip("boto3") + + # temporary workaround as moto fails for botocore >= 1.11 otherwise, + # see https://github.com/spulec/moto/issues/1924 & 1952 + monkeypatch.setenv("AWS_ACCESS_KEY_ID", "foobar_key") + monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "foobar_secret") + if is_ci_environment(): + if is_platform_arm() or is_platform_mac() or is_platform_windows(): + # NOT RUN on Windows/macOS/ARM, only Ubuntu + # - subprocess in CI can cause timeouts + # - GitHub Actions do not support + # container services for the above OSs + # - CircleCI will probably hit the Docker rate pull limit + pytest.skip( + "S3 tests do not have a corresponding service in " + "Windows, macOS or ARM platforms" + ) + else: + # set in .github/workflows/unit-tests.yml + yield "http://localhost:5000" + else: + requests = pytest.importorskip("requests") + pytest.importorskip("moto") + pytest.importorskip("flask") # server mode needs flask too + + # Launching moto in server mode, i.e., as a separate process + # with an S3 endpoint on localhost + + worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw") + endpoint_port = f"555{worker_id}" + endpoint_uri = f"http://127.0.0.1:{endpoint_port}/" + + # pipe to null to avoid logging in terminal + with subprocess.Popen( + shlex.split(f"moto_server s3 -p {endpoint_port}"), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) as proc: + timeout = 5 + while timeout > 0: + try: + # OK to go once server is accepting connections + r = requests.get(endpoint_uri) + if r.ok: + break + except Exception: + pass + timeout -= 0.1 + time.sleep(0.1) + yield endpoint_uri + + proc.terminate() + + +@pytest.fixture +def s3so(s3_base): + return {"client_kwargs": {"endpoint_url": s3_base}} + + +@pytest.fixture +def s3_resource(s3_base): + import boto3 + + s3 = boto3.resource("s3", endpoint_url=s3_base) + return s3 + + +@pytest.fixture +def s3_public_bucket(s3_resource): + bucket = s3_resource.Bucket(f"pandas-test-{uuid.uuid4()}") + bucket.create() + yield bucket + bucket.objects.delete() + bucket.delete() + + +@pytest.fixture +def s3_public_bucket_with_data( + s3_public_bucket, tips_file, jsonl_file, feather_file, xml_file +): + """ + The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + """ + test_s3_files = [ + ("tips#1.csv", tips_file), + ("tips.csv", tips_file), + ("tips.csv.gz", tips_file + ".gz"), + ("tips.csv.bz2", tips_file + ".bz2"), + ("items.jsonl", jsonl_file), + ("simple_dataset.feather", feather_file), + ("books.xml", xml_file), + ] + for s3_key, file_name in test_s3_files: + with open(file_name, "rb") as f: + s3_public_bucket.put_object(Key=s3_key, Body=f) + return s3_public_bucket + + +@pytest.fixture +def s3_private_bucket(s3_resource): + bucket = s3_resource.Bucket(f"cant_get_it-{uuid.uuid4()}") + bucket.create(ACL="private") + yield bucket + bucket.objects.delete() + bucket.delete() + + +@pytest.fixture +def s3_private_bucket_with_data( + s3_private_bucket, tips_file, jsonl_file, feather_file, xml_file +): + """ + The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + """ + test_s3_files = [ + ("tips#1.csv", tips_file), + ("tips.csv", tips_file), + ("tips.csv.gz", tips_file + ".gz"), + ("tips.csv.bz2", tips_file + ".bz2"), + ("items.jsonl", jsonl_file), + ("simple_dataset.feather", feather_file), + ("books.xml", xml_file), + ] + for s3_key, file_name in test_s3_files: + with open(file_name, "rb") as f: + s3_private_bucket.put_object(Key=s3_key, Body=f) + return s3_private_bucket + + +_compression_formats_params = [ + (".no_compress", None), + ("", None), + (".gz", "gzip"), + (".GZ", "gzip"), + (".bz2", "bz2"), + (".BZ2", "bz2"), + (".zip", "zip"), + (".ZIP", "zip"), + (".xz", "xz"), + (".XZ", "xz"), + pytest.param((".zst", "zstd"), marks=td.skip_if_no("zstandard")), + pytest.param((".ZST", "zstd"), marks=td.skip_if_no("zstandard")), +] + + +@pytest.fixture(params=_compression_formats_params[1:]) +def compression_format(request): + return request.param + + +@pytest.fixture(params=_compression_formats_params) +def compression_ext(request): + return request.param[0] + + +@pytest.fixture( + params=[ + "python", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + ] +) +def string_storage(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + """ + return request.param diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_console.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_console.py new file mode 100644 index 0000000000000000000000000000000000000000..dd7b57df9baed18b172dc8398a61a49e9435f82a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_console.py @@ -0,0 +1,72 @@ +import locale + +import pytest + +from pandas._config import detect_console_encoding + + +class MockEncoding: + """ + Used to add a side effect when accessing the 'encoding' property. If the + side effect is a str in nature, the value will be returned. Otherwise, the + side effect should be an exception that will be raised. + """ + + def __init__(self, encoding) -> None: + super().__init__() + self.val = encoding + + @property + def encoding(self): + return self.raise_or_return(self.val) + + @staticmethod + def raise_or_return(val): + if isinstance(val, str): + return val + else: + raise val + + +@pytest.mark.parametrize("empty,filled", [["stdin", "stdout"], ["stdout", "stdin"]]) +def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled): + # Ensures that when sys.stdout.encoding or sys.stdin.encoding is used when + # they have values filled. + # GH 21552 + with monkeypatch.context() as context: + context.setattr(f"sys.{empty}", MockEncoding("")) + context.setattr(f"sys.{filled}", MockEncoding(filled)) + assert detect_console_encoding() == filled + + +@pytest.mark.parametrize("encoding", [AttributeError, OSError, "ascii"]) +def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding): + # GH 21552 + with monkeypatch.context() as context: + context.setattr("locale.getpreferredencoding", lambda: "foo") + context.setattr("sys.stdout", MockEncoding(encoding)) + assert detect_console_encoding() == "foo" + + +@pytest.mark.parametrize( + "std,locale", + [ + ["ascii", "ascii"], + ["ascii", locale.Error], + [AttributeError, "ascii"], + [AttributeError, locale.Error], + [OSError, "ascii"], + [OSError, locale.Error], + ], +) +def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale): + # When both the stdout/stdin encoding and locale preferred encoding checks + # fail (or return 'ascii', we should default to the sys default encoding. + # GH 21552 + with monkeypatch.context() as context: + context.setattr( + "locale.getpreferredencoding", lambda: MockEncoding.raise_or_return(locale) + ) + context.setattr("sys.stdout", MockEncoding(std)) + context.setattr("sys.getdefaultencoding", lambda: "sysDefaultEncoding") + assert detect_console_encoding() == "sysDefaultEncoding" diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py new file mode 100644 index 0000000000000000000000000000000000000000..acf2bc72c687d44dd1769468d21fba1bb04443b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py @@ -0,0 +1,129 @@ +# Note! This file is aimed specifically at pandas.io.formats.printing utility +# functions, not the general printing of pandas objects. +import string + +import pandas._config.config as cf + +from pandas.io.formats import printing + + +def test_adjoin(): + data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]] + expected = "a dd ggg\nb ee hhh\nc ff iii" + + adjoined = printing.adjoin(2, *data) + + assert adjoined == expected + + +class TestPPrintThing: + def test_repr_binary_type(self): + letters = string.ascii_letters + try: + raw = bytes(letters, encoding=cf.get_option("display.encoding")) + except TypeError: + raw = bytes(letters) + b = str(raw.decode("utf-8")) + res = printing.pprint_thing(b, quote_strings=True) + assert res == repr(b) + res = printing.pprint_thing(b, quote_strings=False) + assert res == b + + def test_repr_obeys_max_seq_limit(self): + with cf.option_context("display.max_seq_items", 2000): + assert len(printing.pprint_thing(list(range(1000)))) > 1000 + + with cf.option_context("display.max_seq_items", 5): + assert len(printing.pprint_thing(list(range(1000)))) < 100 + + with cf.option_context("display.max_seq_items", 1): + assert len(printing.pprint_thing(list(range(1000)))) < 9 + + def test_repr_set(self): + assert printing.pprint_thing({1}) == "{1}" + + +class TestFormatBase: + def test_adjoin(self): + data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]] + expected = "a dd ggg\nb ee hhh\nc ff iii" + + adjoined = printing.adjoin(2, *data) + + assert adjoined == expected + + def test_adjoin_unicode(self): + data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "hhh", "いいい"]] + expected = "あ dd ggg\nb ええ hhh\nc ff いいい" + adjoined = printing.adjoin(2, *data) + assert adjoined == expected + + adj = printing._EastAsianTextAdjustment() + + expected = """あ dd ggg +b ええ hhh +c ff いいい""" + + adjoined = adj.adjoin(2, *data) + assert adjoined == expected + cols = adjoined.split("\n") + assert adj.len(cols[0]) == 13 + assert adj.len(cols[1]) == 13 + assert adj.len(cols[2]) == 16 + + expected = """あ dd ggg +b ええ hhh +c ff いいい""" + + adjoined = adj.adjoin(7, *data) + assert adjoined == expected + cols = adjoined.split("\n") + assert adj.len(cols[0]) == 23 + assert adj.len(cols[1]) == 23 + assert adj.len(cols[2]) == 26 + + def test_justify(self): + adj = printing._EastAsianTextAdjustment() + + def just(x, *args, **kwargs): + # wrapper to test single str + return adj.justify([x], *args, **kwargs)[0] + + assert just("abc", 5, mode="left") == "abc " + assert just("abc", 5, mode="center") == " abc " + assert just("abc", 5, mode="right") == " abc" + assert just("abc", 5, mode="left") == "abc " + assert just("abc", 5, mode="center") == " abc " + assert just("abc", 5, mode="right") == " abc" + + assert just("パンダ", 5, mode="left") == "パンダ" + assert just("パンダ", 5, mode="center") == "パンダ" + assert just("パンダ", 5, mode="right") == "パンダ" + + assert just("パンダ", 10, mode="left") == "パンダ " + assert just("パンダ", 10, mode="center") == " パンダ " + assert just("パンダ", 10, mode="right") == " パンダ" + + def test_east_asian_len(self): + adj = printing._EastAsianTextAdjustment() + + assert adj.len("abc") == 3 + assert adj.len("abc") == 3 + + assert adj.len("パンダ") == 6 + assert adj.len("パンダ") == 5 + assert adj.len("パンダpanda") == 11 + assert adj.len("パンダpanda") == 10 + + def test_ambiguous_width(self): + adj = printing._EastAsianTextAdjustment() + assert adj.len("¡¡ab") == 4 + + with cf.option_context("display.unicode.ambiguous_as_wide", True): + adj = printing._EastAsianTextAdjustment() + assert adj.len("¡¡ab") == 6 + + data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "¡¡ab", "いいい"]] + expected = "あ dd ggg \nb ええ ¡¡ab\nc ff いいい" + adjoined = adj.adjoin(2, *data) + assert adjoined == expected diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py new file mode 100644 index 0000000000000000000000000000000000000000..927a9f4961f6ff7ae51f74aceb0cb36dc6754c21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py @@ -0,0 +1,429 @@ +"""Tests formatting as writer-agnostic ExcelCells + +ExcelFormatter is tested implicitly in pandas/tests/io/excel +""" +import string + +import pytest + +from pandas.errors import CSSWarning + +import pandas._testing as tm + +from pandas.io.formats.excel import ( + CssExcelCell, + CSSToExcelConverter, +) + + +@pytest.mark.parametrize( + "css,expected", + [ + # FONT + # - name + ("font-family: foo,bar", {"font": {"name": "foo"}}), + ('font-family: "foo bar",baz', {"font": {"name": "foo bar"}}), + ("font-family: foo,\nbar", {"font": {"name": "foo"}}), + ("font-family: foo, bar, baz", {"font": {"name": "foo"}}), + ("font-family: bar, foo", {"font": {"name": "bar"}}), + ("font-family: 'foo bar', baz", {"font": {"name": "foo bar"}}), + ("font-family: 'foo \\'bar', baz", {"font": {"name": "foo 'bar"}}), + ('font-family: "foo \\"bar", baz', {"font": {"name": 'foo "bar'}}), + ('font-family: "foo ,bar", baz', {"font": {"name": "foo ,bar"}}), + # - family + ("font-family: serif", {"font": {"name": "serif", "family": 1}}), + ("font-family: Serif", {"font": {"name": "serif", "family": 1}}), + ("font-family: roman, serif", {"font": {"name": "roman", "family": 1}}), + ("font-family: roman, sans-serif", {"font": {"name": "roman", "family": 2}}), + ("font-family: roman, sans serif", {"font": {"name": "roman"}}), + ("font-family: roman, sansserif", {"font": {"name": "roman"}}), + ("font-family: roman, cursive", {"font": {"name": "roman", "family": 4}}), + ("font-family: roman, fantasy", {"font": {"name": "roman", "family": 5}}), + # - size + ("font-size: 1em", {"font": {"size": 12}}), + ("font-size: xx-small", {"font": {"size": 6}}), + ("font-size: x-small", {"font": {"size": 7.5}}), + ("font-size: small", {"font": {"size": 9.6}}), + ("font-size: medium", {"font": {"size": 12}}), + ("font-size: large", {"font": {"size": 13.5}}), + ("font-size: x-large", {"font": {"size": 18}}), + ("font-size: xx-large", {"font": {"size": 24}}), + ("font-size: 50%", {"font": {"size": 6}}), + # - bold + ("font-weight: 100", {"font": {"bold": False}}), + ("font-weight: 200", {"font": {"bold": False}}), + ("font-weight: 300", {"font": {"bold": False}}), + ("font-weight: 400", {"font": {"bold": False}}), + ("font-weight: normal", {"font": {"bold": False}}), + ("font-weight: lighter", {"font": {"bold": False}}), + ("font-weight: bold", {"font": {"bold": True}}), + ("font-weight: bolder", {"font": {"bold": True}}), + ("font-weight: 700", {"font": {"bold": True}}), + ("font-weight: 800", {"font": {"bold": True}}), + ("font-weight: 900", {"font": {"bold": True}}), + # - italic + ("font-style: italic", {"font": {"italic": True}}), + ("font-style: oblique", {"font": {"italic": True}}), + # - underline + ("text-decoration: underline", {"font": {"underline": "single"}}), + ("text-decoration: overline", {}), + ("text-decoration: none", {}), + # - strike + ("text-decoration: line-through", {"font": {"strike": True}}), + ( + "text-decoration: underline line-through", + {"font": {"strike": True, "underline": "single"}}, + ), + ( + "text-decoration: underline; text-decoration: line-through", + {"font": {"strike": True}}, + ), + # - color + ("color: red", {"font": {"color": "FF0000"}}), + ("color: #ff0000", {"font": {"color": "FF0000"}}), + ("color: #f0a", {"font": {"color": "FF00AA"}}), + # - shadow + ("text-shadow: none", {"font": {"shadow": False}}), + ("text-shadow: 0px -0em 0px #CCC", {"font": {"shadow": False}}), + ("text-shadow: 0px -0em 0px #999", {"font": {"shadow": False}}), + ("text-shadow: 0px -0em 0px", {"font": {"shadow": False}}), + ("text-shadow: 2px -0em 0px #CCC", {"font": {"shadow": True}}), + ("text-shadow: 0px -2em 0px #CCC", {"font": {"shadow": True}}), + ("text-shadow: 0px -0em 2px #CCC", {"font": {"shadow": True}}), + ("text-shadow: 0px -0em 2px", {"font": {"shadow": True}}), + ("text-shadow: 0px -2em", {"font": {"shadow": True}}), + # FILL + # - color, fillType + ( + "background-color: red", + {"fill": {"fgColor": "FF0000", "patternType": "solid"}}, + ), + ( + "background-color: #ff0000", + {"fill": {"fgColor": "FF0000", "patternType": "solid"}}, + ), + ( + "background-color: #f0a", + {"fill": {"fgColor": "FF00AA", "patternType": "solid"}}, + ), + # BORDER + # - style + ( + "border-style: solid", + { + "border": { + "top": {"style": "medium"}, + "bottom": {"style": "medium"}, + "left": {"style": "medium"}, + "right": {"style": "medium"}, + } + }, + ), + ( + "border-style: solid; border-width: thin", + { + "border": { + "top": {"style": "thin"}, + "bottom": {"style": "thin"}, + "left": {"style": "thin"}, + "right": {"style": "thin"}, + } + }, + ), + ( + "border-top-style: solid; border-top-width: thin", + {"border": {"top": {"style": "thin"}}}, + ), + ( + "border-top-style: solid; border-top-width: 1pt", + {"border": {"top": {"style": "thin"}}}, + ), + ("border-top-style: solid", {"border": {"top": {"style": "medium"}}}), + ( + "border-top-style: solid; border-top-width: medium", + {"border": {"top": {"style": "medium"}}}, + ), + ( + "border-top-style: solid; border-top-width: 2pt", + {"border": {"top": {"style": "medium"}}}, + ), + ( + "border-top-style: solid; border-top-width: thick", + {"border": {"top": {"style": "thick"}}}, + ), + ( + "border-top-style: solid; border-top-width: 4pt", + {"border": {"top": {"style": "thick"}}}, + ), + ( + "border-top-style: dotted", + {"border": {"top": {"style": "mediumDashDotDot"}}}, + ), + ( + "border-top-style: dotted; border-top-width: thin", + {"border": {"top": {"style": "dotted"}}}, + ), + ("border-top-style: dashed", {"border": {"top": {"style": "mediumDashed"}}}), + ( + "border-top-style: dashed; border-top-width: thin", + {"border": {"top": {"style": "dashed"}}}, + ), + ("border-top-style: double", {"border": {"top": {"style": "double"}}}), + # - color + ( + "border-style: solid; border-color: #0000ff", + { + "border": { + "top": {"style": "medium", "color": "0000FF"}, + "right": {"style": "medium", "color": "0000FF"}, + "bottom": {"style": "medium", "color": "0000FF"}, + "left": {"style": "medium", "color": "0000FF"}, + } + }, + ), + ( + "border-top-style: double; border-top-color: blue", + {"border": {"top": {"style": "double", "color": "0000FF"}}}, + ), + ( + "border-top-style: solid; border-top-color: #06c", + {"border": {"top": {"style": "medium", "color": "0066CC"}}}, + ), + ( + "border-top-color: blue", + {"border": {"top": {"color": "0000FF", "style": "none"}}}, + ), + # ALIGNMENT + # - horizontal + ("text-align: center", {"alignment": {"horizontal": "center"}}), + ("text-align: left", {"alignment": {"horizontal": "left"}}), + ("text-align: right", {"alignment": {"horizontal": "right"}}), + ("text-align: justify", {"alignment": {"horizontal": "justify"}}), + # - vertical + ("vertical-align: top", {"alignment": {"vertical": "top"}}), + ("vertical-align: text-top", {"alignment": {"vertical": "top"}}), + ("vertical-align: middle", {"alignment": {"vertical": "center"}}), + ("vertical-align: bottom", {"alignment": {"vertical": "bottom"}}), + ("vertical-align: text-bottom", {"alignment": {"vertical": "bottom"}}), + # - wrap_text + ("white-space: nowrap", {"alignment": {"wrap_text": False}}), + ("white-space: pre", {"alignment": {"wrap_text": False}}), + ("white-space: pre-line", {"alignment": {"wrap_text": False}}), + ("white-space: normal", {"alignment": {"wrap_text": True}}), + # NUMBER FORMAT + ("number-format: 0%", {"number_format": {"format_code": "0%"}}), + ( + "number-format: 0§[Red](0)§-§@;", + {"number_format": {"format_code": "0;[red](0);-;@"}}, # GH 46152 + ), + ], +) +def test_css_to_excel(css, expected): + convert = CSSToExcelConverter() + assert expected == convert(css) + + +def test_css_to_excel_multiple(): + convert = CSSToExcelConverter() + actual = convert( + """ + font-weight: bold; + text-decoration: underline; + color: red; + border-width: thin; + text-align: center; + vertical-align: top; + unused: something; + """ + ) + assert { + "font": {"bold": True, "underline": "single", "color": "FF0000"}, + "border": { + "top": {"style": "thin"}, + "right": {"style": "thin"}, + "bottom": {"style": "thin"}, + "left": {"style": "thin"}, + }, + "alignment": {"horizontal": "center", "vertical": "top"}, + } == actual + + +@pytest.mark.parametrize( + "css,inherited,expected", + [ + ("font-weight: bold", "", {"font": {"bold": True}}), + ("", "font-weight: bold", {"font": {"bold": True}}), + ( + "font-weight: bold", + "font-style: italic", + {"font": {"bold": True, "italic": True}}, + ), + ("font-style: normal", "font-style: italic", {"font": {"italic": False}}), + ("font-style: inherit", "", {}), + ( + "font-style: normal; font-style: inherit", + "font-style: italic", + {"font": {"italic": True}}, + ), + ], +) +def test_css_to_excel_inherited(css, inherited, expected): + convert = CSSToExcelConverter(inherited) + assert expected == convert(css) + + +@pytest.mark.parametrize( + "input_color,output_color", + ( + list(CSSToExcelConverter.NAMED_COLORS.items()) + + [("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()] + + [("#F0F", "FF00FF"), ("#ABC", "AABBCC")] + ), +) +def test_css_to_excel_good_colors(input_color, output_color): + # see gh-18392 + css = ( + f"border-top-color: {input_color}; " + f"border-right-color: {input_color}; " + f"border-bottom-color: {input_color}; " + f"border-left-color: {input_color}; " + f"background-color: {input_color}; " + f"color: {input_color}" + ) + + expected = {} + + expected["fill"] = {"patternType": "solid", "fgColor": output_color} + + expected["font"] = {"color": output_color} + + expected["border"] = { + k: {"color": output_color, "style": "none"} + for k in ("top", "right", "bottom", "left") + } + + with tm.assert_produces_warning(None): + convert = CSSToExcelConverter() + assert expected == convert(css) + + +@pytest.mark.parametrize("input_color", [None, "not-a-color"]) +def test_css_to_excel_bad_colors(input_color): + # see gh-18392 + css = ( + f"border-top-color: {input_color}; " + f"border-right-color: {input_color}; " + f"border-bottom-color: {input_color}; " + f"border-left-color: {input_color}; " + f"background-color: {input_color}; " + f"color: {input_color}" + ) + + expected = {} + + if input_color is not None: + expected["fill"] = {"patternType": "solid"} + + with tm.assert_produces_warning(CSSWarning): + convert = CSSToExcelConverter() + assert expected == convert(css) + + +def tests_css_named_colors_valid(): + upper_hexs = set(map(str.upper, string.hexdigits)) + for color in CSSToExcelConverter.NAMED_COLORS.values(): + assert len(color) == 6 and all(c in upper_hexs for c in color) + + +def test_css_named_colors_from_mpl_present(): + mpl_colors = pytest.importorskip("matplotlib.colors") + + pd_colors = CSSToExcelConverter.NAMED_COLORS + for name, color in mpl_colors.CSS4_COLORS.items(): + assert name in pd_colors and pd_colors[name] == color[1:] + + +@pytest.mark.parametrize( + "styles,expected", + [ + ([("color", "green"), ("color", "red")], "color: red;"), + ([("font-weight", "bold"), ("font-weight", "normal")], "font-weight: normal;"), + ([("text-align", "center"), ("TEXT-ALIGN", "right")], "text-align: right;"), + ], +) +def test_css_excel_cell_precedence(styles, expected): + """It applies favors latter declarations over former declarations""" + # See GH 47371 + converter = CSSToExcelConverter() + converter._call_cached.cache_clear() + css_styles = {(0, 0): styles} + cell = CssExcelCell( + row=0, + col=0, + val="", + style=None, + css_styles=css_styles, + css_row=0, + css_col=0, + css_converter=converter, + ) + converter._call_cached.cache_clear() + + assert cell.style == converter(expected) + + +@pytest.mark.parametrize( + "styles,cache_hits,cache_misses", + [ + ([[("color", "green"), ("color", "red"), ("color", "green")]], 0, 1), + ( + [ + [("font-weight", "bold")], + [("font-weight", "normal"), ("font-weight", "bold")], + ], + 1, + 1, + ), + ([[("text-align", "center")], [("TEXT-ALIGN", "center")]], 1, 1), + ( + [ + [("font-weight", "bold"), ("text-align", "center")], + [("font-weight", "bold"), ("text-align", "left")], + ], + 0, + 2, + ), + ( + [ + [("font-weight", "bold"), ("text-align", "center")], + [("font-weight", "bold"), ("text-align", "left")], + [("font-weight", "bold"), ("text-align", "center")], + ], + 1, + 2, + ), + ], +) +def test_css_excel_cell_cache(styles, cache_hits, cache_misses): + """It caches unique cell styles""" + # See GH 47371 + converter = CSSToExcelConverter() + converter._call_cached.cache_clear() + + css_styles = {(0, i): _style for i, _style in enumerate(styles)} + for css_row, css_col in css_styles: + CssExcelCell( + row=0, + col=0, + val="", + style=None, + css_styles=css_styles, + css_row=css_row, + css_col=css_col, + css_converter=converter, + ) + cache_info = converter._call_cached.cache_info() + converter._call_cached.cache_clear() + + assert cache_info.hits == cache_hits + assert cache_info.misses == cache_misses diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py new file mode 100644 index 0000000000000000000000000000000000000000..85eca834ff0d43ca30eb4043ed9f97fd3807899b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py @@ -0,0 +1,106 @@ +from io import ( + BytesIO, + StringIO, +) + +import pytest + +import pandas as pd +import pandas._testing as tm + +pytest.importorskip("tabulate") + + +def test_simple(): + buf = StringIO() + df = pd.DataFrame([1, 2, 3]) + df.to_markdown(buf=buf) + result = buf.getvalue() + assert ( + result == "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |" + ) + + +def test_empty_frame(): + buf = StringIO() + df = pd.DataFrame({"id": [], "first_name": [], "last_name": []}).set_index("id") + df.to_markdown(buf=buf) + result = buf.getvalue() + assert result == ( + "| id | first_name | last_name |\n" + "|------|--------------|-------------|" + ) + + +def test_other_tablefmt(): + buf = StringIO() + df = pd.DataFrame([1, 2, 3]) + df.to_markdown(buf=buf, tablefmt="jira") + result = buf.getvalue() + assert result == "|| || 0 ||\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |" + + +def test_other_headers(): + buf = StringIO() + df = pd.DataFrame([1, 2, 3]) + df.to_markdown(buf=buf, headers=["foo", "bar"]) + result = buf.getvalue() + assert result == ( + "| foo | bar |\n|------:|------:|\n| 0 " + "| 1 |\n| 1 | 2 |\n| 2 | 3 |" + ) + + +def test_series(): + buf = StringIO() + s = pd.Series([1, 2, 3], name="foo") + s.to_markdown(buf=buf) + result = buf.getvalue() + assert result == ( + "| | foo |\n|---:|------:|\n| 0 | 1 " + "|\n| 1 | 2 |\n| 2 | 3 |" + ) + + +def test_no_buf(): + df = pd.DataFrame([1, 2, 3]) + result = df.to_markdown() + assert ( + result == "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |" + ) + + +@pytest.mark.parametrize("index", [True, False]) +def test_index(index): + # GH 32667 + + df = pd.DataFrame([1, 2, 3]) + + result = df.to_markdown(index=index) + + if index: + expected = ( + "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |" + ) + else: + expected = "| 0 |\n|----:|\n| 1 |\n| 2 |\n| 3 |" + assert result == expected + + +def test_showindex_disallowed_in_kwargs(): + # GH 32667; disallowing showindex in kwargs enforced in 2.0 + df = pd.DataFrame([1, 2, 3]) + with pytest.raises(ValueError, match="Pass 'index' instead of 'showindex"): + df.to_markdown(index=True, showindex=True) + + +def test_markdown_pos_args_deprecatation(): + # GH-54229 + df = pd.DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_markdown except for the " + r"argument 'buf' will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + buffer = BytesIO() + df.to_markdown(buffer, "grid") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py new file mode 100644 index 0000000000000000000000000000000000000000..2e5a5005cb0761c104ed6de26cd5a6ef730c08d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py @@ -0,0 +1,1216 @@ +from datetime import ( + datetime, + timedelta, +) +from io import StringIO +import re +import sys +from textwrap import dedent + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas import ( + CategoricalIndex, + DataFrame, + Index, + NaT, + Series, + Timestamp, + concat, + date_range, + get_option, + option_context, + read_csv, + timedelta_range, + to_datetime, +) +import pandas._testing as tm + + +def _three_digit_exp(): + return f"{1.7e8:.4g}" == "1.7e+008" + + +class TestDataFrameToStringFormatters: + def test_to_string_masked_ea_with_formatter(self): + # GH#39336 + df = DataFrame( + { + "a": Series([0.123456789, 1.123456789], dtype="Float64"), + "b": Series([1, 2], dtype="Int64"), + } + ) + result = df.to_string(formatters=["{:.2f}".format, "{:.2f}".format]) + expected = dedent( + """\ + a b + 0 0.12 1.00 + 1 1.12 2.00""" + ) + assert result == expected + + def test_to_string_with_formatters(self): + df = DataFrame( + { + "int": [1, 2, 3], + "float": [1.0, 2.0, 3.0], + "object": [(1, 2), True, False], + }, + columns=["int", "float", "object"], + ) + + formatters = [ + ("int", lambda x: f"0x{x:x}"), + ("float", lambda x: f"[{x: 4.1f}]"), + ("object", lambda x: f"-{x!s}-"), + ] + result = df.to_string(formatters=dict(formatters)) + result2 = df.to_string(formatters=list(zip(*formatters))[1]) + assert result == ( + " int float object\n" + "0 0x1 [ 1.0] -(1, 2)-\n" + "1 0x2 [ 2.0] -True-\n" + "2 0x3 [ 3.0] -False-" + ) + assert result == result2 + + def test_to_string_with_datetime64_monthformatter(self): + months = [datetime(2016, 1, 1), datetime(2016, 2, 2)] + x = DataFrame({"months": months}) + + def format_func(x): + return x.strftime("%Y-%m") + + result = x.to_string(formatters={"months": format_func}) + expected = dedent( + """\ + months + 0 2016-01 + 1 2016-02""" + ) + assert result.strip() == expected + + def test_to_string_with_datetime64_hourformatter(self): + x = DataFrame( + {"hod": to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")} + ) + + def format_func(x): + return x.strftime("%H:%M") + + result = x.to_string(formatters={"hod": format_func}) + expected = dedent( + """\ + hod + 0 10:10 + 1 12:12""" + ) + assert result.strip() == expected + + def test_to_string_with_formatters_unicode(self): + df = DataFrame({"c/\u03c3": [1, 2, 3]}) + result = df.to_string(formatters={"c/\u03c3": str}) + expected = dedent( + """\ + c/\u03c3 + 0 1 + 1 2 + 2 3""" + ) + assert result == expected + + def test_to_string_index_formatter(self): + df = DataFrame([range(5), range(5, 10), range(10, 15)]) + + rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]}) + + xp = dedent( + """\ + 0 1 2 3 4 + a 0 1 2 3 4 + b 5 6 7 8 9 + c 10 11 12 13 14\ + """ + ) + assert rs == xp + + def test_no_extra_space(self): + # GH#52690: Check that no extra space is given + col1 = "TEST" + col2 = "PANDAS" + col3 = "to_string" + expected = f"{col1:<6s} {col2:<7s} {col3:<10s}" + df = DataFrame([{"col1": "TEST", "col2": "PANDAS", "col3": "to_string"}]) + d = {"col1": "{:<6s}".format, "col2": "{:<7s}".format, "col3": "{:<10s}".format} + result = df.to_string(index=False, header=False, formatters=d) + assert result == expected + + +class TestDataFrameToStringColSpace: + def test_to_string_with_column_specific_col_space_raises(self): + df = DataFrame( + np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"] + ) + + msg = ( + "Col_space length\\(\\d+\\) should match " + "DataFrame number of columns\\(\\d+\\)" + ) + with pytest.raises(ValueError, match=msg): + df.to_string(col_space=[30, 40]) + + with pytest.raises(ValueError, match=msg): + df.to_string(col_space=[30, 40, 50, 60]) + + msg = "unknown column" + with pytest.raises(ValueError, match=msg): + df.to_string(col_space={"a": "foo", "b": 23, "d": 34}) + + def test_to_string_with_column_specific_col_space(self): + df = DataFrame( + np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"] + ) + + result = df.to_string(col_space={"a": 10, "b": 11, "c": 12}) + # 3 separating space + each col_space for (id, a, b, c) + assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12) + + result = df.to_string(col_space=[10, 11, 12]) + assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12) + + def test_to_string_with_col_space(self): + df = DataFrame(np.random.default_rng(2).random(size=(1, 3))) + c10 = len(df.to_string(col_space=10).split("\n")[1]) + c20 = len(df.to_string(col_space=20).split("\n")[1]) + c30 = len(df.to_string(col_space=30).split("\n")[1]) + assert c10 < c20 < c30 + + # GH#8230 + # col_space wasn't being applied with header=False + with_header = df.to_string(col_space=20) + with_header_row1 = with_header.splitlines()[1] + no_header = df.to_string(col_space=20, header=False) + assert len(with_header_row1) == len(no_header) + + def test_to_string_repr_tuples(self): + buf = StringIO() + + df = DataFrame({"tups": list(zip(range(10), range(10)))}) + repr(df) + df.to_string(col_space=10, buf=buf) + + +class TestDataFrameToStringHeader: + def test_to_string_header_false(self): + # GH#49230 + df = DataFrame([1, 2]) + df.index.name = "a" + s = df.to_string(header=False) + expected = "a \n0 1\n1 2" + assert s == expected + + df = DataFrame([[1, 2], [3, 4]]) + df.index.name = "a" + s = df.to_string(header=False) + expected = "a \n0 1 2\n1 3 4" + assert s == expected + + def test_to_string_multindex_header(self): + # GH#16718 + df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"]) + res = df.to_string(header=["r1", "r2"]) + exp = " r1 r2\na b \n0 1 2 3" + assert res == exp + + def test_to_string_no_header(self): + df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) + + df_s = df.to_string(header=False) + expected = "0 1 4\n1 2 5\n2 3 6" + + assert df_s == expected + + def test_to_string_specified_header(self): + df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) + + df_s = df.to_string(header=["X", "Y"]) + expected = " X Y\n0 1 4\n1 2 5\n2 3 6" + + assert df_s == expected + + msg = "Writing 2 cols but got 1 aliases" + with pytest.raises(ValueError, match=msg): + df.to_string(header=["X"]) + + +class TestDataFrameToStringLineWidth: + def test_to_string_line_width(self): + df = DataFrame(123, index=range(10, 15), columns=range(30)) + lines = df.to_string(line_width=80) + assert max(len(line) for line in lines.split("\n")) == 80 + + def test_to_string_line_width_no_index(self): + # GH#13998, GH#22505 + df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) + + df_s = df.to_string(line_width=1, index=False) + expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 " + + assert df_s == expected + + df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) + + df_s = df.to_string(line_width=1, index=False) + expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 " + + assert df_s == expected + + df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]}) + + df_s = df.to_string(line_width=1, index=False) + expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 " + + assert df_s == expected + + def test_to_string_line_width_no_header(self): + # GH#53054 + df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) + + df_s = df.to_string(line_width=1, header=False) + expected = "0 1 \\\n1 2 \n2 3 \n\n0 4 \n1 5 \n2 6 " + + assert df_s == expected + + df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) + + df_s = df.to_string(line_width=1, header=False) + expected = "0 11 \\\n1 22 \n2 33 \n\n0 4 \n1 5 \n2 6 " + + assert df_s == expected + + df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]}) + + df_s = df.to_string(line_width=1, header=False) + expected = "0 11 \\\n1 22 \n2 -33 \n\n0 4 \n1 5 \n2 -6 " + + assert df_s == expected + + def test_to_string_line_width_with_both_index_and_header(self): + # GH#53054 + df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) + + df_s = df.to_string(line_width=1) + expected = ( + " x \\\n0 1 \n1 2 \n2 3 \n\n y \n0 4 \n1 5 \n2 6 " + ) + + assert df_s == expected + + df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) + + df_s = df.to_string(line_width=1) + expected = ( + " x \\\n0 11 \n1 22 \n2 33 \n\n y \n0 4 \n1 5 \n2 6 " + ) + + assert df_s == expected + + df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]}) + + df_s = df.to_string(line_width=1) + expected = ( + " x \\\n0 11 \n1 22 \n2 -33 \n\n y \n0 4 \n1 5 \n2 -6 " + ) + + assert df_s == expected + + def test_to_string_line_width_no_index_no_header(self): + # GH#53054 + df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) + + df_s = df.to_string(line_width=1, index=False, header=False) + expected = "1 \\\n2 \n3 \n\n4 \n5 \n6 " + + assert df_s == expected + + df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) + + df_s = df.to_string(line_width=1, index=False, header=False) + expected = "11 \\\n22 \n33 \n\n4 \n5 \n6 " + + assert df_s == expected + + df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]}) + + df_s = df.to_string(line_width=1, index=False, header=False) + expected = " 11 \\\n 22 \n-33 \n\n 4 \n 5 \n-6 " + + assert df_s == expected + + +class TestToStringNumericFormatting: + def test_to_string_float_format_no_fixed_width(self): + # GH#21625 + df = DataFrame({"x": [0.19999]}) + expected = " x\n0 0.200" + assert df.to_string(float_format="%.3f") == expected + + # GH#22270 + df = DataFrame({"x": [100.0]}) + expected = " x\n0 100" + assert df.to_string(float_format="%.0f") == expected + + def test_to_string_small_float_values(self): + df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]}) + + result = df.to_string() + # sadness per above + if _three_digit_exp(): + expected = ( + " a\n" + "0 1.500000e+000\n" + "1 1.000000e-017\n" + "2 -5.500000e-007" + ) + else: + expected = ( + " a\n" + "0 1.500000e+00\n" + "1 1.000000e-17\n" + "2 -5.500000e-07" + ) + assert result == expected + + # but not all exactly zero + df = df * 0 + result = df.to_string() + expected = " 0\n0 0\n1 0\n2 -0" + # TODO: assert that these match?? + + def test_to_string_complex_float_formatting(self): + # GH #25514, 25745 + with option_context("display.precision", 5): + df = DataFrame( + { + "x": [ + (0.4467846931321966 + 0.0715185102060818j), + (0.2739442392974528 + 0.23515228785438969j), + (0.26974928742135185 + 0.3250604054898979j), + (-1j), + ] + } + ) + result = df.to_string() + expected = ( + " x\n0 0.44678+0.07152j\n" + "1 0.27394+0.23515j\n" + "2 0.26975+0.32506j\n" + "3 -0.00000-1.00000j" + ) + assert result == expected + + def test_to_string_format_inf(self): + # GH#24861 + df = DataFrame( + { + "A": [-np.inf, np.inf, -1, -2.1234, 3, 4], + "B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"], + } + ) + result = df.to_string() + + expected = ( + " A B\n" + "0 -inf -inf\n" + "1 inf inf\n" + "2 -1.0000 foo\n" + "3 -2.1234 foooo\n" + "4 3.0000 fooooo\n" + "5 4.0000 bar" + ) + assert result == expected + + df = DataFrame( + { + "A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0], + "B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"], + } + ) + result = df.to_string() + + expected = ( + " A B\n" + "0 -inf -inf\n" + "1 inf inf\n" + "2 -1.0 foo\n" + "3 -2.0 foooo\n" + "4 3.0 fooooo\n" + "5 4.0 bar" + ) + assert result == expected + + def test_to_string_int_formatting(self): + df = DataFrame({"x": [-15, 20, 25, -35]}) + assert issubclass(df["x"].dtype.type, np.integer) + + output = df.to_string() + expected = " x\n0 -15\n1 20\n2 25\n3 -35" + assert output == expected + + def test_to_string_float_formatting(self): + with option_context( + "display.precision", + 5, + "display.notebook_repr_html", + False, + ): + df = DataFrame( + {"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]} + ) + + df_s = df.to_string() + + if _three_digit_exp(): + expected = ( + " x\n0 0.00000e+000\n1 2.50000e-001\n" + "2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n" + "5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n" + "8 -1.00000e+006" + ) + else: + expected = ( + " x\n0 0.00000e+00\n1 2.50000e-01\n" + "2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n" + "5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n" + "8 -1.00000e+06" + ) + assert df_s == expected + + df = DataFrame({"x": [3234, 0.253]}) + df_s = df.to_string() + + expected = " x\n0 3234.000\n1 0.253" + assert df_s == expected + + assert get_option("display.precision") == 6 + + df = DataFrame({"x": [1e9, 0.2512]}) + df_s = df.to_string() + + if _three_digit_exp(): + expected = " x\n0 1.000000e+009\n1 2.512000e-001" + else: + expected = " x\n0 1.000000e+09\n1 2.512000e-01" + assert df_s == expected + + +class TestDataFrameToString: + def test_to_string_decimal(self): + # GH#23614 + df = DataFrame({"A": [6.0, 3.1, 2.2]}) + expected = " A\n0 6,0\n1 3,1\n2 2,2" + assert df.to_string(decimal=",") == expected + + def test_to_string_left_justify_cols(self): + df = DataFrame({"x": [3234, 0.253]}) + df_s = df.to_string(justify="left") + expected = " x \n0 3234.000\n1 0.253" + assert df_s == expected + + def test_to_string_format_na(self): + df = DataFrame( + { + "A": [np.nan, -1, -2.1234, 3, 4], + "B": [np.nan, "foo", "foooo", "fooooo", "bar"], + } + ) + result = df.to_string() + + expected = ( + " A B\n" + "0 NaN NaN\n" + "1 -1.0000 foo\n" + "2 -2.1234 foooo\n" + "3 3.0000 fooooo\n" + "4 4.0000 bar" + ) + assert result == expected + + df = DataFrame( + { + "A": [np.nan, -1.0, -2.0, 3.0, 4.0], + "B": [np.nan, "foo", "foooo", "fooooo", "bar"], + } + ) + result = df.to_string() + + expected = ( + " A B\n" + "0 NaN NaN\n" + "1 -1.0 foo\n" + "2 -2.0 foooo\n" + "3 3.0 fooooo\n" + "4 4.0 bar" + ) + assert result == expected + + def test_to_string_with_dict_entries(self): + df = DataFrame({"A": [{"a": 1, "b": 2}]}) + + val = df.to_string() + assert "'a': 1" in val + assert "'b': 2" in val + + def test_to_string_with_categorical_columns(self): + # GH#35439 + data = [[4, 2], [3, 2], [4, 3]] + cols = ["aaaaaaaaa", "b"] + df = DataFrame(data, columns=cols) + df_cat_cols = DataFrame(data, columns=CategoricalIndex(cols)) + + assert df.to_string() == df_cat_cols.to_string() + + def test_repr_embedded_ndarray(self): + arr = np.empty(10, dtype=[("err", object)]) + for i in range(len(arr)): + arr["err"][i] = np.random.default_rng(2).standard_normal(i) + + df = DataFrame(arr) + repr(df["err"]) + repr(df) + df.to_string() + + def test_to_string_truncate(self): + # GH 9784 - dont truncate when calling DataFrame.to_string + df = DataFrame( + [ + { + "a": "foo", + "b": "bar", + "c": "let's make this a very VERY long line that is longer " + "than the default 50 character limit", + "d": 1, + }, + {"a": "foo", "b": "bar", "c": "stuff", "d": 1}, + ] + ) + df.set_index(["a", "b", "c"]) + assert df.to_string() == ( + " a b " + " c d\n" + "0 foo bar let's make this a very VERY long line t" + "hat is longer than the default 50 character limit 1\n" + "1 foo bar " + " stuff 1" + ) + with option_context("max_colwidth", 20): + # the display option has no effect on the to_string method + assert df.to_string() == ( + " a b " + " c d\n" + "0 foo bar let's make this a very VERY long line t" + "hat is longer than the default 50 character limit 1\n" + "1 foo bar " + " stuff 1" + ) + assert df.to_string(max_colwidth=20) == ( + " a b c d\n" + "0 foo bar let's make this ... 1\n" + "1 foo bar stuff 1" + ) + + @pytest.mark.parametrize( + "input_array, expected", + [ + ({"A": ["a"]}, "A\na"), + ({"A": ["a", "b"], "B": ["c", "dd"]}, "A B\na c\nb dd"), + ({"A": ["a", 1], "B": ["aa", 1]}, "A B\na aa\n1 1"), + ], + ) + def test_format_remove_leading_space_dataframe(self, input_array, expected): + # GH#24980 + df = DataFrame(input_array).to_string(index=False) + assert df == expected + + @pytest.mark.parametrize( + "data,expected", + [ + ( + {"col1": [1, 2], "col2": [3, 4]}, + " col1 col2\n0 1 3\n1 2 4", + ), + ( + {"col1": ["Abc", 0.756], "col2": [np.nan, 4.5435]}, + " col1 col2\n0 Abc NaN\n1 0.756 4.5435", + ), + ( + {"col1": [np.nan, "a"], "col2": [0.009, 3.543], "col3": ["Abc", 23]}, + " col1 col2 col3\n0 NaN 0.009 Abc\n1 a 3.543 23", + ), + ], + ) + def test_to_string_max_rows_zero(self, data, expected): + # GH#35394 + result = DataFrame(data=data).to_string(max_rows=0) + assert result == expected + + @pytest.mark.parametrize( + "max_cols, max_rows, expected", + [ + ( + 10, + None, + " 0 1 2 3 4 ... 6 7 8 9 10\n" + " 0 0 0 0 0 ... 0 0 0 0 0\n" + " 0 0 0 0 0 ... 0 0 0 0 0\n" + " 0 0 0 0 0 ... 0 0 0 0 0\n" + " 0 0 0 0 0 ... 0 0 0 0 0", + ), + ( + None, + 2, + " 0 1 2 3 4 5 6 7 8 9 10\n" + " 0 0 0 0 0 0 0 0 0 0 0\n" + " .. .. .. .. .. .. .. .. .. .. ..\n" + " 0 0 0 0 0 0 0 0 0 0 0", + ), + ( + 10, + 2, + " 0 1 2 3 4 ... 6 7 8 9 10\n" + " 0 0 0 0 0 ... 0 0 0 0 0\n" + " .. .. .. .. .. ... .. .. .. .. ..\n" + " 0 0 0 0 0 ... 0 0 0 0 0", + ), + ( + 9, + 2, + " 0 1 2 3 ... 7 8 9 10\n" + " 0 0 0 0 ... 0 0 0 0\n" + " .. .. .. .. ... .. .. .. ..\n" + " 0 0 0 0 ... 0 0 0 0", + ), + ( + 1, + 1, + " 0 ...\n 0 ...\n.. ...", + ), + ], + ) + def test_truncation_no_index(self, max_cols, max_rows, expected): + df = DataFrame([[0] * 11] * 4) + assert ( + df.to_string(index=False, max_cols=max_cols, max_rows=max_rows) == expected + ) + + def test_to_string_no_index(self): + # GH#16839, GH#13032 + df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]}) + + df_s = df.to_string(index=False) + # Leading space is expected for positive numbers. + expected = " x y z\n11 33 AAA\n22 -44 " + assert df_s == expected + + df_s = df[["y", "x", "z"]].to_string(index=False) + expected = " y x z\n 33 11 AAA\n-44 22 " + assert df_s == expected + + def test_to_string_unicode_columns(self, float_frame): + df = DataFrame({"\u03c3": np.arange(10.0)}) + + buf = StringIO() + df.to_string(buf=buf) + buf.getvalue() + + buf = StringIO() + df.info(buf=buf) + buf.getvalue() + + result = float_frame.to_string() + assert isinstance(result, str) + + @pytest.mark.parametrize("na_rep", ["NaN", "Ted"]) + def test_to_string_na_rep_and_float_format(self, na_rep): + # GH#13828 + df = DataFrame([["A", 1.2225], ["A", None]], columns=["Group", "Data"]) + result = df.to_string(na_rep=na_rep, float_format="{:.2f}".format) + expected = dedent( + f"""\ + Group Data + 0 A 1.22 + 1 A {na_rep}""" + ) + assert result == expected + + def test_to_string_string_dtype(self): + # GH#50099 + pytest.importorskip("pyarrow") + df = DataFrame( + {"x": ["foo", "bar", "baz"], "y": ["a", "b", "c"], "z": [1, 2, 3]} + ) + df = df.astype( + {"x": "string[pyarrow]", "y": "string[python]", "z": "int64[pyarrow]"} + ) + result = df.dtypes.to_string() + expected = dedent( + """\ + x string[pyarrow] + y string[python] + z int64[pyarrow]""" + ) + assert result == expected + + def test_to_string_pos_args_deprecation(self): + # GH#54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + "Starting with pandas version 3.0 all arguments of to_string " + "except for the " + "argument 'buf' will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + buf = StringIO() + df.to_string(buf, None, None, True, True) + + def test_to_string_utf8_columns(self): + n = "\u05d0".encode() + df = DataFrame([1, 2], columns=[n]) + + with option_context("display.max_rows", 1): + repr(df) + + def test_to_string_unicode_two(self): + dm = DataFrame({"c/\u03c3": []}) + buf = StringIO() + dm.to_string(buf) + + def test_to_string_unicode_three(self): + dm = DataFrame(["\xc2"]) + buf = StringIO() + dm.to_string(buf) + + def test_to_string_with_float_index(self): + index = Index([1.5, 2, 3, 4, 5]) + df = DataFrame(np.arange(5), index=index) + + result = df.to_string() + expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4" + assert result == expected + + def test_to_string(self): + # big mixed + biggie = DataFrame( + { + "A": np.random.default_rng(2).standard_normal(200), + "B": Index([f"{i}?!" for i in range(200)]), + }, + ) + + biggie.loc[:20, "A"] = np.nan + biggie.loc[:20, "B"] = np.nan + s = biggie.to_string() + + buf = StringIO() + retval = biggie.to_string(buf=buf) + assert retval is None + assert buf.getvalue() == s + + assert isinstance(s, str) + + # print in right order + result = biggie.to_string( + columns=["B", "A"], col_space=17, float_format="%.5f".__mod__ + ) + lines = result.split("\n") + header = lines[0].strip().split() + joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]]) + recons = read_csv(StringIO(joined), names=header, header=None, sep=" ") + tm.assert_series_equal(recons["B"], biggie["B"]) + assert recons["A"].count() == biggie["A"].count() + assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all() + + # FIXME: don't leave commented-out + # expected = ['B', 'A'] + # assert header == expected + + result = biggie.to_string(columns=["A"], col_space=17) + header = result.split("\n")[0].strip().split() + expected = ["A"] + assert header == expected + + biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"}) + + biggie.to_string(columns=["B", "A"], float_format=str) + biggie.to_string(columns=["B", "A"], col_space=12, float_format=str) + + frame = DataFrame(index=np.arange(200)) + frame.to_string() + + # TODO: split or simplify this test? + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="fix when arrow is default") + def test_to_string_index_with_nan(self): + # GH#2850 + df = DataFrame( + { + "id1": {0: "1a3", 1: "9h4"}, + "id2": {0: np.nan, 1: "d67"}, + "id3": {0: "78d", 1: "79d"}, + "value": {0: 123, 1: 64}, + } + ) + + # multi-index + y = df.set_index(["id1", "id2", "id3"]) + result = y.to_string() + expected = ( + " value\nid1 id2 id3 \n" + "1a3 NaN 78d 123\n9h4 d67 79d 64" + ) + assert result == expected + + # index + y = df.set_index("id2") + result = y.to_string() + expected = ( + " id1 id3 value\nid2 \n" + "NaN 1a3 78d 123\nd67 9h4 79d 64" + ) + assert result == expected + + # with append (this failed in 0.12) + y = df.set_index(["id1", "id2"]).set_index("id3", append=True) + result = y.to_string() + expected = ( + " value\nid1 id2 id3 \n" + "1a3 NaN 78d 123\n9h4 d67 79d 64" + ) + assert result == expected + + # all-nan in mi + df2 = df.copy() + df2.loc[:, "id2"] = np.nan + y = df2.set_index("id2") + result = y.to_string() + expected = ( + " id1 id3 value\nid2 \n" + "NaN 1a3 78d 123\nNaN 9h4 79d 64" + ) + assert result == expected + + # partial nan in mi + df2 = df.copy() + df2.loc[:, "id2"] = np.nan + y = df2.set_index(["id2", "id3"]) + result = y.to_string() + expected = ( + " id1 value\nid2 id3 \n" + "NaN 78d 1a3 123\n 79d 9h4 64" + ) + assert result == expected + + df = DataFrame( + { + "id1": {0: np.nan, 1: "9h4"}, + "id2": {0: np.nan, 1: "d67"}, + "id3": {0: np.nan, 1: "79d"}, + "value": {0: 123, 1: 64}, + } + ) + + y = df.set_index(["id1", "id2", "id3"]) + result = y.to_string() + expected = ( + " value\nid1 id2 id3 \n" + "NaN NaN NaN 123\n9h4 d67 79d 64" + ) + assert result == expected + + def test_to_string_nonunicode_nonascii_alignment(self): + df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]]) + rep_str = df.to_string() + lines = rep_str.split("\n") + assert len(lines[1]) == len(lines[2]) + + def test_unicode_problem_decoding_as_ascii(self): + df = DataFrame({"c/\u03c3": Series({"test": np.nan})}) + str(df.to_string()) + + def test_to_string_repr_unicode(self): + buf = StringIO() + + unicode_values = ["\u03c3"] * 10 + unicode_values = np.array(unicode_values, dtype=object) + df = DataFrame({"unicode": unicode_values}) + df.to_string(col_space=10, buf=buf) + + # it works! + repr(df) + # it works even if sys.stdin in None + _stdin = sys.stdin + try: + sys.stdin = None + repr(df) + finally: + sys.stdin = _stdin + + +class TestSeriesToString: + def test_to_string_without_index(self): + # GH#11729 Test index=False option + ser = Series([1, 2, 3, 4]) + result = ser.to_string(index=False) + expected = "\n".join(["1", "2", "3", "4"]) + assert result == expected + + def test_to_string_name(self): + ser = Series(range(100), dtype="int64") + ser.name = "myser" + res = ser.to_string(max_rows=2, name=True) + exp = "0 0\n ..\n99 99\nName: myser" + assert res == exp + res = ser.to_string(max_rows=2, name=False) + exp = "0 0\n ..\n99 99" + assert res == exp + + def test_to_string_dtype(self): + ser = Series(range(100), dtype="int64") + res = ser.to_string(max_rows=2, dtype=True) + exp = "0 0\n ..\n99 99\ndtype: int64" + assert res == exp + res = ser.to_string(max_rows=2, dtype=False) + exp = "0 0\n ..\n99 99" + assert res == exp + + def test_to_string_length(self): + ser = Series(range(100), dtype="int64") + res = ser.to_string(max_rows=2, length=True) + exp = "0 0\n ..\n99 99\nLength: 100" + assert res == exp + + def test_to_string_na_rep(self): + ser = Series(index=range(100), dtype=np.float64) + res = ser.to_string(na_rep="foo", max_rows=2) + exp = "0 foo\n ..\n99 foo" + assert res == exp + + def test_to_string_float_format(self): + ser = Series(range(10), dtype="float64") + res = ser.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2) + exp = "0 0.0\n ..\n9 9.0" + assert res == exp + + def test_to_string_header(self): + ser = Series(range(10), dtype="int64") + ser.index.name = "foo" + res = ser.to_string(header=True, max_rows=2) + exp = "foo\n0 0\n ..\n9 9" + assert res == exp + res = ser.to_string(header=False, max_rows=2) + exp = "0 0\n ..\n9 9" + assert res == exp + + def test_to_string_empty_col(self): + # GH#13653 + ser = Series(["", "Hello", "World", "", "", "Mooooo", "", ""]) + res = ser.to_string(index=False) + exp = " \n Hello\n World\n \n \nMooooo\n \n " + assert re.match(exp, res) + + def test_to_string_timedelta64(self): + Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string() + + ser = Series(date_range("2012-1-1", periods=3, freq="D")) + + # GH#2146 + + # adding NaTs + y = ser - ser.shift(1) + result = y.to_string() + assert "1 days" in result + assert "00:00:00" not in result + assert "NaT" in result + + # with frac seconds + o = Series([datetime(2012, 1, 1, microsecond=150)] * 3) + y = ser - o + result = y.to_string() + assert "-1 days +23:59:59.999850" in result + + # rounding? + o = Series([datetime(2012, 1, 1, 1)] * 3) + y = ser - o + result = y.to_string() + assert "-1 days +23:00:00" in result + assert "1 days 23:00:00" in result + + o = Series([datetime(2012, 1, 1, 1, 1)] * 3) + y = ser - o + result = y.to_string() + assert "-1 days +22:59:00" in result + assert "1 days 22:59:00" in result + + o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3) + y = ser - o + result = y.to_string() + assert "-1 days +22:58:59.999850" in result + assert "0 days 22:58:59.999850" in result + + # neg time + td = timedelta(minutes=5, seconds=3) + s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td + y = ser - s2 + result = y.to_string() + assert "-1 days +23:54:57" in result + + td = timedelta(microseconds=550) + s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td + y = ser - td + result = y.to_string() + assert "2012-01-01 23:59:59.999450" in result + + # no boxing of the actual elements + td = Series(timedelta_range("1 days", periods=3)) + result = td.to_string() + assert result == "0 1 days\n1 2 days\n2 3 days" + + def test_to_string(self): + ts = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10, freq="B"), + ) + buf = StringIO() + + s = ts.to_string() + + retval = ts.to_string(buf=buf) + assert retval is None + assert buf.getvalue().strip() == s + + # pass float_format + format = "%.4f".__mod__ + result = ts.to_string(float_format=format) + result = [x.split()[1] for x in result.split("\n")[:-1]] + expected = [format(x) for x in ts] + assert result == expected + + # empty string + result = ts[:0].to_string() + assert result == "Series([], Freq: B)" + + result = ts[:0].to_string(length=0) + assert result == "Series([], Freq: B)" + + # name and length + cp = ts.copy() + cp.name = "foo" + result = cp.to_string(length=True, name=True, dtype=True) + last_line = result.split("\n")[-1].strip() + assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64") + + @pytest.mark.parametrize( + "input_array, expected", + [ + ("a", "a"), + (["a", "b"], "a\nb"), + ([1, "a"], "1\na"), + (1, "1"), + ([0, -1], " 0\n-1"), + (1.0, "1.0"), + ([" a", " b"], " a\n b"), + ([".1", "1"], ".1\n 1"), + (["10", "-10"], " 10\n-10"), + ], + ) + def test_format_remove_leading_space_series(self, input_array, expected): + # GH: 24980 + ser = Series(input_array) + result = ser.to_string(index=False) + assert result == expected + + def test_to_string_complex_number_trims_zeros(self): + ser = Series([1.000000 + 1.000000j, 1.0 + 1.0j, 1.05 + 1.0j]) + result = ser.to_string() + expected = dedent( + """\ + 0 1.00+1.00j + 1 1.00+1.00j + 2 1.05+1.00j""" + ) + assert result == expected + + def test_nullable_float_to_string(self, float_ea_dtype): + # https://github.com/pandas-dev/pandas/issues/36775 + dtype = float_ea_dtype + ser = Series([0.0, 1.0, None], dtype=dtype) + result = ser.to_string() + expected = dedent( + """\ + 0 0.0 + 1 1.0 + 2 """ + ) + assert result == expected + + def test_nullable_int_to_string(self, any_int_ea_dtype): + # https://github.com/pandas-dev/pandas/issues/36775 + dtype = any_int_ea_dtype + ser = Series([0, 1, None], dtype=dtype) + result = ser.to_string() + expected = dedent( + """\ + 0 0 + 1 1 + 2 """ + ) + assert result == expected + + def test_to_string_mixed(self): + ser = Series(["foo", np.nan, -1.23, 4.56]) + result = ser.to_string() + expected = "".join(["0 foo\n", "1 NaN\n", "2 -1.23\n", "3 4.56"]) + assert result == expected + + # but don't count NAs as floats + ser = Series(["foo", np.nan, "bar", "baz"]) + result = ser.to_string() + expected = "".join(["0 foo\n", "1 NaN\n", "2 bar\n", "3 baz"]) + assert result == expected + + ser = Series(["foo", 5, "bar", "baz"]) + result = ser.to_string() + expected = "".join(["0 foo\n", "1 5\n", "2 bar\n", "3 baz"]) + assert result == expected + + def test_to_string_float_na_spacing(self): + ser = Series([0.0, 1.5678, 2.0, -3.0, 4.0]) + ser[::2] = np.nan + + result = ser.to_string() + expected = ( + "0 NaN\n" + "1 1.5678\n" + "2 NaN\n" + "3 -3.0000\n" + "4 NaN" + ) + assert result == expected + + def test_to_string_with_datetimeindex(self): + index = date_range("20130102", periods=6) + ser = Series(1, index=index) + result = ser.to_string() + assert "2013-01-02" in result + + # nat in index + s2 = Series(2, index=[Timestamp("20130111"), NaT]) + ser = concat([s2, ser]) + result = ser.to_string() + assert "NaT" in result + + # nat in summary + result = str(s2.index) + assert "NaT" in result diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py b/venv/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py new file mode 100644 index 0000000000000000000000000000000000000000..9bfd8eb9d51d59ef83c7a4d6fcf8bbeb1ef24025 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py @@ -0,0 +1,342 @@ +""" +self-contained to write legacy storage pickle files + +To use this script. Create an environment where you want +generate pickles, say its for 0.20.3, with your pandas clone +in ~/pandas + +. activate pandas_0.20.3 +cd ~/pandas/pandas + +$ python -m tests.io.generate_legacy_storage_files \ + tests/io/data/legacy_pickle/0.20.3/ pickle + +This script generates a storage file for the current arch, system, +and python version + pandas version: 0.20.3 + output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/ + storage format: pickle +created pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle + +The idea here is you are using the *current* version of the +generate_legacy_storage_files with an *older* version of pandas to +generate a pickle file. We will then check this file into a current +branch, and test using test_pickle.py. This will load the *older* +pickles and test versus the current data that is generated +(with main). These are then compared. + +If we have cases where we changed the signature (e.g. we renamed +offset -> freq in Timestamp). Then we have to conditionally execute +in the generate_legacy_storage_files.py to make it +run under the older AND the newer version. + +""" + +from datetime import timedelta +import os +import pickle +import platform as pl +import sys + +# Remove script directory from path, otherwise Python will try to +# import the JSON test directory as the json module +sys.path.pop(0) + +import numpy as np + +import pandas +from pandas import ( + Categorical, + DataFrame, + Index, + MultiIndex, + NaT, + Period, + RangeIndex, + Series, + Timestamp, + bdate_range, + date_range, + interval_range, + period_range, + timedelta_range, +) +from pandas.arrays import SparseArray + +from pandas.tseries.offsets import ( + FY5253, + BusinessDay, + BusinessHour, + CustomBusinessDay, + DateOffset, + Day, + Easter, + Hour, + LastWeekOfMonth, + Minute, + MonthBegin, + MonthEnd, + QuarterBegin, + QuarterEnd, + SemiMonthBegin, + SemiMonthEnd, + Week, + WeekOfMonth, + YearBegin, + YearEnd, +) + + +def _create_sp_series(): + nan = np.nan + + # nan-based + arr = np.arange(15, dtype=np.float64) + arr[7:12] = nan + arr[-1:] = nan + + bseries = Series(SparseArray(arr, kind="block")) + bseries.name = "bseries" + return bseries + + +def _create_sp_tsseries(): + nan = np.nan + + # nan-based + arr = np.arange(15, dtype=np.float64) + arr[7:12] = nan + arr[-1:] = nan + + date_index = bdate_range("1/1/2011", periods=len(arr)) + bseries = Series(SparseArray(arr, kind="block"), index=date_index) + bseries.name = "btsseries" + return bseries + + +def _create_sp_frame(): + nan = np.nan + + data = { + "A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], + "B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], + "C": np.arange(10).astype(np.int64), + "D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan], + } + + dates = bdate_range("1/1/2011", periods=10) + return DataFrame(data, index=dates).apply(SparseArray) + + +def create_pickle_data(): + """create the pickle data""" + data = { + "A": [0.0, 1.0, 2.0, 3.0, np.nan], + "B": [0, 1, 0, 1, 0], + "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], + "D": date_range("1/1/2009", periods=5), + "E": [0.0, 1, Timestamp("20100101"), "foo", 2.0], + } + + scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")} + + index = { + "int": Index(np.arange(10)), + "date": date_range("20130101", periods=10), + "period": period_range("2013-01-01", freq="M", periods=10), + "float": Index(np.arange(10, dtype=np.float64)), + "uint": Index(np.arange(10, dtype=np.uint64)), + "timedelta": timedelta_range("00:00:00", freq="30min", periods=10), + } + + index["range"] = RangeIndex(10) + + index["interval"] = interval_range(0, periods=10) + + mi = { + "reg2": MultiIndex.from_tuples( + tuple( + zip( + *[ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + ) + ), + names=["first", "second"], + ) + } + + series = { + "float": Series(data["A"]), + "int": Series(data["B"]), + "mixed": Series(data["E"]), + "ts": Series( + np.arange(10).astype(np.int64), index=date_range("20130101", periods=10) + ), + "mi": Series( + np.arange(5).astype(np.float64), + index=MultiIndex.from_tuples( + tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"] + ), + ), + "dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]), + "cat": Series(Categorical(["foo", "bar", "baz"])), + "dt": Series(date_range("20130101", periods=5)), + "dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")), + "period": Series([Period("2000Q1")] * 5), + } + + mixed_dup_df = DataFrame(data) + mixed_dup_df.columns = list("ABCDA") + frame = { + "float": DataFrame({"A": series["float"], "B": series["float"] + 1}), + "int": DataFrame({"A": series["int"], "B": series["int"] + 1}), + "mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}), + "mi": DataFrame( + {"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)}, + index=MultiIndex.from_tuples( + tuple( + zip( + *[ + ["bar", "bar", "baz", "baz", "baz"], + ["one", "two", "one", "two", "three"], + ] + ) + ), + names=["first", "second"], + ), + ), + "dup": DataFrame( + np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"] + ), + "cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}), + "cat_and_float": DataFrame( + { + "A": Categorical(["foo", "bar", "baz"]), + "B": np.arange(3).astype(np.int64), + } + ), + "mixed_dup": mixed_dup_df, + "dt_mixed_tzs": DataFrame( + { + "A": Timestamp("20130102", tz="US/Eastern"), + "B": Timestamp("20130603", tz="CET"), + }, + index=range(5), + ), + "dt_mixed2_tzs": DataFrame( + { + "A": Timestamp("20130102", tz="US/Eastern"), + "B": Timestamp("20130603", tz="CET"), + "C": Timestamp("20130603", tz="UTC"), + }, + index=range(5), + ), + } + + cat = { + "int8": Categorical(list("abcdefg")), + "int16": Categorical(np.arange(1000)), + "int32": Categorical(np.arange(10000)), + } + + timestamp = { + "normal": Timestamp("2011-01-01"), + "nat": NaT, + "tz": Timestamp("2011-01-01", tz="US/Eastern"), + } + + off = { + "DateOffset": DateOffset(years=1), + "DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824), + "BusinessDay": BusinessDay(offset=timedelta(seconds=9)), + "BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"), + "CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"), + "SemiMonthBegin": SemiMonthBegin(day_of_month=9), + "SemiMonthEnd": SemiMonthEnd(day_of_month=24), + "MonthBegin": MonthBegin(1), + "MonthEnd": MonthEnd(1), + "QuarterBegin": QuarterBegin(1), + "QuarterEnd": QuarterEnd(1), + "Day": Day(1), + "YearBegin": YearBegin(1), + "YearEnd": YearEnd(1), + "Week": Week(1), + "Week_Tues": Week(2, normalize=False, weekday=1), + "WeekOfMonth": WeekOfMonth(week=3, weekday=4), + "LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3), + "FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"), + "Easter": Easter(), + "Hour": Hour(1), + "Minute": Minute(1), + } + + return { + "series": series, + "frame": frame, + "index": index, + "scalars": scalars, + "mi": mi, + "sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()}, + "sp_frame": {"float": _create_sp_frame()}, + "cat": cat, + "timestamp": timestamp, + "offsets": off, + } + + +def platform_name(): + return "_".join( + [ + str(pandas.__version__), + str(pl.machine()), + str(pl.system().lower()), + str(pl.python_version()), + ] + ) + + +def write_legacy_pickles(output_dir): + version = pandas.__version__ + + print( + "This script generates a storage file for the current arch, system, " + "and python version" + ) + print(f" pandas version: {version}") + print(f" output dir : {output_dir}") + print(" storage format: pickle") + + pth = f"{platform_name()}.pickle" + + with open(os.path.join(output_dir, pth), "wb") as fh: + pickle.dump(create_pickle_data(), fh, pickle.DEFAULT_PROTOCOL) + + print(f"created pickle file: {pth}") + + +def write_legacy_file(): + # force our cwd to be the first searched + sys.path.insert(0, "") + + if not 3 <= len(sys.argv) <= 4: + sys.exit( + "Specify output directory and storage type: generate_legacy_" + "storage_files.py " + ) + + output_dir = str(sys.argv[1]) + storage_type = str(sys.argv[2]) + + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + if storage_type == "pickle": + write_legacy_pickles(output_dir=output_dir) + else: + sys.exit("storage_type must be one of {'pickle'}") + + +if __name__ == "__main__": + write_legacy_file() diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_clipboard.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_clipboard.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0208fcc74ec83f782e1fedf5e89b40fca3ed69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_clipboard.py @@ -0,0 +1,423 @@ +from textwrap import dedent + +import numpy as np +import pytest + +from pandas.errors import ( + PyperclipException, + PyperclipWindowsException, +) + +import pandas as pd +from pandas import ( + NA, + DataFrame, + Series, + get_option, + read_clipboard, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +from pandas.io.clipboard import ( + CheckedCall, + _stringifyText, + init_qt_clipboard, +) + + +def build_kwargs(sep, excel): + kwargs = {} + if excel != "default": + kwargs["excel"] = excel + if sep != "default": + kwargs["sep"] = sep + return kwargs + + +@pytest.fixture( + params=[ + "delims", + "utf8", + "utf16", + "string", + "long", + "nonascii", + "colwidth", + "mixed", + "float", + "int", + ] +) +def df(request): + data_type = request.param + + if data_type == "delims": + return DataFrame({"a": ['"a,\t"b|c', "d\tef`"], "b": ["hi'j", "k''lm"]}) + elif data_type == "utf8": + return DataFrame({"a": ["µasd", "Ωœ∑`"], "b": ["øπ∆˚¬", "œ∑`®"]}) + elif data_type == "utf16": + return DataFrame( + {"a": ["\U0001f44d\U0001f44d", "\U0001f44d\U0001f44d"], "b": ["abc", "def"]} + ) + elif data_type == "string": + return DataFrame( + np.array([f"i-{i}" for i in range(15)]).reshape(5, 3), columns=list("abc") + ) + elif data_type == "long": + max_rows = get_option("display.max_rows") + return DataFrame( + np.random.default_rng(2).integers(0, 10, size=(max_rows + 1, 3)), + columns=list("abc"), + ) + elif data_type == "nonascii": + return DataFrame({"en": "in English".split(), "es": "en español".split()}) + elif data_type == "colwidth": + _cw = get_option("display.max_colwidth") + 1 + return DataFrame( + np.array(["x" * _cw for _ in range(15)]).reshape(5, 3), columns=list("abc") + ) + elif data_type == "mixed": + return DataFrame( + { + "a": np.arange(1.0, 6.0) + 0.01, + "b": np.arange(1, 6).astype(np.int64), + "c": list("abcde"), + } + ) + elif data_type == "float": + return DataFrame(np.random.default_rng(2).random((5, 3)), columns=list("abc")) + elif data_type == "int": + return DataFrame( + np.random.default_rng(2).integers(0, 10, (5, 3)), columns=list("abc") + ) + else: + raise ValueError + + +@pytest.fixture +def mock_ctypes(monkeypatch): + """ + Mocks WinError to help with testing the clipboard. + """ + + def _mock_win_error(): + return "Window Error" + + # Set raising to False because WinError won't exist on non-windows platforms + with monkeypatch.context() as m: + m.setattr("ctypes.WinError", _mock_win_error, raising=False) + yield + + +@pytest.mark.usefixtures("mock_ctypes") +def test_checked_call_with_bad_call(monkeypatch): + """ + Give CheckCall a function that returns a falsey value and + mock get_errno so it returns false so an exception is raised. + """ + + def _return_false(): + return False + + monkeypatch.setattr("pandas.io.clipboard.get_errno", lambda: True) + msg = f"Error calling {_return_false.__name__} \\(Window Error\\)" + + with pytest.raises(PyperclipWindowsException, match=msg): + CheckedCall(_return_false)() + + +@pytest.mark.usefixtures("mock_ctypes") +def test_checked_call_with_valid_call(monkeypatch): + """ + Give CheckCall a function that returns a truthy value and + mock get_errno so it returns true so an exception is not raised. + The function should return the results from _return_true. + """ + + def _return_true(): + return True + + monkeypatch.setattr("pandas.io.clipboard.get_errno", lambda: False) + + # Give CheckedCall a callable that returns a truthy value s + checked_call = CheckedCall(_return_true) + assert checked_call() is True + + +@pytest.mark.parametrize( + "text", + [ + "String_test", + True, + 1, + 1.0, + 1j, + ], +) +def test_stringify_text(text): + valid_types = (str, int, float, bool) + + if isinstance(text, valid_types): + result = _stringifyText(text) + assert result == str(text) + else: + msg = ( + "only str, int, float, and bool values " + f"can be copied to the clipboard, not {type(text).__name__}" + ) + with pytest.raises(PyperclipException, match=msg): + _stringifyText(text) + + +@pytest.fixture +def set_pyqt_clipboard(monkeypatch): + qt_cut, qt_paste = init_qt_clipboard() + with monkeypatch.context() as m: + m.setattr(pd.io.clipboard, "clipboard_set", qt_cut) + m.setattr(pd.io.clipboard, "clipboard_get", qt_paste) + yield + + +@pytest.fixture +def clipboard(qapp): + clip = qapp.clipboard() + yield clip + clip.clear() + + +@pytest.mark.single_cpu +@pytest.mark.clipboard +@pytest.mark.usefixtures("set_pyqt_clipboard") +@pytest.mark.usefixtures("clipboard") +class TestClipboard: + # Test that default arguments copy as tab delimited + # Test that explicit delimiters are respected + @pytest.mark.parametrize("sep", [None, "\t", ",", "|"]) + @pytest.mark.parametrize("encoding", [None, "UTF-8", "utf-8", "utf8"]) + def test_round_trip_frame_sep(self, df, sep, encoding): + df.to_clipboard(excel=None, sep=sep, encoding=encoding) + result = read_clipboard(sep=sep or "\t", index_col=0, encoding=encoding) + tm.assert_frame_equal(df, result) + + # Test white space separator + def test_round_trip_frame_string(self, df): + df.to_clipboard(excel=False, sep=None) + result = read_clipboard() + assert df.to_string() == result.to_string() + assert df.shape == result.shape + + # Two character separator is not supported in to_clipboard + # Test that multi-character separators are not silently passed + def test_excel_sep_warning(self, df): + with tm.assert_produces_warning( + UserWarning, + match="to_clipboard in excel mode requires a single character separator.", + check_stacklevel=False, + ): + df.to_clipboard(excel=True, sep=r"\t") + + # Separator is ignored when excel=False and should produce a warning + def test_copy_delim_warning(self, df): + with tm.assert_produces_warning(): + df.to_clipboard(excel=False, sep="\t") + + # Tests that the default behavior of to_clipboard is tab + # delimited and excel="True" + @pytest.mark.parametrize("sep", ["\t", None, "default"]) + @pytest.mark.parametrize("excel", [True, None, "default"]) + def test_clipboard_copy_tabs_default(self, sep, excel, df, clipboard): + kwargs = build_kwargs(sep, excel) + df.to_clipboard(**kwargs) + assert clipboard.text() == df.to_csv(sep="\t") + + # Tests reading of white space separated tables + @pytest.mark.parametrize("sep", [None, "default"]) + def test_clipboard_copy_strings(self, sep, df): + kwargs = build_kwargs(sep, False) + df.to_clipboard(**kwargs) + result = read_clipboard(sep=r"\s+") + assert result.to_string() == df.to_string() + assert df.shape == result.shape + + def test_read_clipboard_infer_excel(self, clipboard): + # gh-19010: avoid warnings + clip_kwargs = {"engine": "python"} + + text = dedent( + """ + John James\tCharlie Mingus + 1\t2 + 4\tHarry Carney + """.strip() + ) + clipboard.setText(text) + df = read_clipboard(**clip_kwargs) + + # excel data is parsed correctly + assert df.iloc[1, 1] == "Harry Carney" + + # having diff tab counts doesn't trigger it + text = dedent( + """ + a\t b + 1 2 + 3 4 + """.strip() + ) + clipboard.setText(text) + res = read_clipboard(**clip_kwargs) + + text = dedent( + """ + a b + 1 2 + 3 4 + """.strip() + ) + clipboard.setText(text) + exp = read_clipboard(**clip_kwargs) + + tm.assert_frame_equal(res, exp) + + def test_infer_excel_with_nulls(self, clipboard): + # GH41108 + text = "col1\tcol2\n1\tred\n\tblue\n2\tgreen" + + clipboard.setText(text) + df = read_clipboard() + df_expected = DataFrame( + data={"col1": [1, None, 2], "col2": ["red", "blue", "green"]} + ) + + # excel data is parsed correctly + tm.assert_frame_equal(df, df_expected) + + @pytest.mark.parametrize( + "multiindex", + [ + ( # Can't use `dedent` here as it will remove the leading `\t` + "\n".join( + [ + "\t\t\tcol1\tcol2", + "A\t0\tTrue\t1\tred", + "A\t1\tTrue\t\tblue", + "B\t0\tFalse\t2\tgreen", + ] + ), + [["A", "A", "B"], [0, 1, 0], [True, True, False]], + ), + ( + "\n".join( + ["\t\tcol1\tcol2", "A\t0\t1\tred", "A\t1\t\tblue", "B\t0\t2\tgreen"] + ), + [["A", "A", "B"], [0, 1, 0]], + ), + ], + ) + def test_infer_excel_with_multiindex(self, clipboard, multiindex): + # GH41108 + + clipboard.setText(multiindex[0]) + df = read_clipboard() + df_expected = DataFrame( + data={"col1": [1, None, 2], "col2": ["red", "blue", "green"]}, + index=multiindex[1], + ) + + # excel data is parsed correctly + tm.assert_frame_equal(df, df_expected) + + def test_invalid_encoding(self, df): + msg = "clipboard only supports utf-8 encoding" + # test case for testing invalid encoding + with pytest.raises(ValueError, match=msg): + df.to_clipboard(encoding="ascii") + with pytest.raises(NotImplementedError, match=msg): + read_clipboard(encoding="ascii") + + @pytest.mark.parametrize("data", ["\U0001f44d...", "Ωœ∑`...", "abcd..."]) + def test_raw_roundtrip(self, data): + # PR #25040 wide unicode wasn't copied correctly on PY3 on windows + df = DataFrame({"data": [data]}) + df.to_clipboard() + result = read_clipboard() + tm.assert_frame_equal(df, result) + + @pytest.mark.parametrize("engine", ["c", "python"]) + def test_read_clipboard_dtype_backend( + self, clipboard, string_storage, dtype_backend, engine + ): + # GH#50502 + if string_storage == "pyarrow" or dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + + if string_storage == "python": + string_array = StringArray(np.array(["x", "y"], dtype=np.object_)) + string_array_na = StringArray(np.array(["x", NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow" and engine != "c": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["x", "y"])) + string_array_na = ArrowExtensionArray(pa.array(["x", None])) + + else: + string_array = ArrowStringArray(pa.array(["x", "y"])) + string_array_na = ArrowStringArray(pa.array(["x", None])) + + text = """a,b,c,d,e,f,g,h,i +x,1,4.0,x,2,4.0,,True,False +y,2,5.0,,,,,False,""" + clipboard.setText(text) + + with pd.option_context("mode.string_storage", string_storage): + result = read_clipboard(sep=",", dtype_backend=dtype_backend, engine=engine) + + expected = DataFrame( + { + "a": string_array, + "b": Series([1, 2], dtype="Int64"), + "c": Series([4.0, 5.0], dtype="Float64"), + "d": string_array_na, + "e": Series([2, NA], dtype="Int64"), + "f": Series([4.0, NA], dtype="Float64"), + "g": Series([NA, NA], dtype="Int64"), + "h": Series([True, False], dtype="boolean"), + "i": Series([False, NA], dtype="boolean"), + } + ) + if dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + expected["g"] = ArrowExtensionArray(pa.array([None, None])) + + tm.assert_frame_equal(result, expected) + + def test_invalid_dtype_backend(self): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + read_clipboard(dtype_backend="numpy") + + def test_to_clipboard_pos_args_deprecation(self): + # GH-54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_clipboard " + r"will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.to_clipboard(True, None) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_common.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..074033868635abf8d62702e4d73f64d4fb742222 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_common.py @@ -0,0 +1,650 @@ +""" +Tests for the pandas.io.common functionalities +""" +import codecs +import errno +from functools import partial +from io import ( + BytesIO, + StringIO, + UnsupportedOperation, +) +import mmap +import os +from pathlib import Path +import pickle +import tempfile + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + +import pandas.io.common as icom + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +class CustomFSPath: + """For testing fspath on unknown objects""" + + def __init__(self, path) -> None: + self.path = path + + def __fspath__(self): + return self.path + + +# Functions that consume a string path and return a string or path-like object +path_types = [str, CustomFSPath, Path] + +try: + from py.path import local as LocalPath + + path_types.append(LocalPath) +except ImportError: + pass + +HERE = os.path.abspath(os.path.dirname(__file__)) + + +# https://github.com/cython/cython/issues/1720 +class TestCommonIOCapabilities: + data1 = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + + def test_expand_user(self): + filename = "~/sometest" + expanded_name = icom._expand_user(filename) + + assert expanded_name != filename + assert os.path.isabs(expanded_name) + assert os.path.expanduser(filename) == expanded_name + + def test_expand_user_normal_path(self): + filename = "/somefolder/sometest" + expanded_name = icom._expand_user(filename) + + assert expanded_name == filename + assert os.path.expanduser(filename) == expanded_name + + def test_stringify_path_pathlib(self): + rel_path = icom.stringify_path(Path(".")) + assert rel_path == "." + redundant_path = icom.stringify_path(Path("foo//bar")) + assert redundant_path == os.path.join("foo", "bar") + + @td.skip_if_no("py.path") + def test_stringify_path_localpath(self): + path = os.path.join("foo", "bar") + abs_path = os.path.abspath(path) + lpath = LocalPath(path) + assert icom.stringify_path(lpath) == abs_path + + def test_stringify_path_fspath(self): + p = CustomFSPath("foo/bar.csv") + result = icom.stringify_path(p) + assert result == "foo/bar.csv" + + def test_stringify_file_and_path_like(self): + # GH 38125: do not stringify file objects that are also path-like + fsspec = pytest.importorskip("fsspec") + with tm.ensure_clean() as path: + with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj: + assert fsspec_obj == icom.stringify_path(fsspec_obj) + + @pytest.mark.parametrize("path_type", path_types) + def test_infer_compression_from_path(self, compression_format, path_type): + extension, expected = compression_format + path = path_type("foo/bar.csv" + extension) + compression = icom.infer_compression(path, compression="infer") + assert compression == expected + + @pytest.mark.parametrize("path_type", [str, CustomFSPath, Path]) + def test_get_handle_with_path(self, path_type): + # ignore LocalPath: it creates strange paths: /absolute/~/sometest + with tempfile.TemporaryDirectory(dir=Path.home()) as tmp: + filename = path_type("~/" + Path(tmp).name + "/sometest") + with icom.get_handle(filename, "w") as handles: + assert Path(handles.handle.name).is_absolute() + assert os.path.expanduser(filename) == handles.handle.name + + def test_get_handle_with_buffer(self): + with StringIO() as input_buffer: + with icom.get_handle(input_buffer, "r") as handles: + assert handles.handle == input_buffer + assert not input_buffer.closed + assert input_buffer.closed + + # Test that BytesIOWrapper(get_handle) returns correct amount of bytes every time + def test_bytesiowrapper_returns_correct_bytes(self): + # Test latin1, ucs-2, and ucs-4 chars + data = """a,b,c +1,2,3 +©,®,® +Look,a snake,🐍""" + with icom.get_handle(StringIO(data), "rb", is_text=False) as handles: + result = b"" + chunksize = 5 + while True: + chunk = handles.handle.read(chunksize) + # Make sure each chunk is correct amount of bytes + assert len(chunk) <= chunksize + if len(chunk) < chunksize: + # Can be less amount of bytes, but only at EOF + # which happens when read returns empty + assert len(handles.handle.read()) == 0 + result += chunk + break + result += chunk + assert result == data.encode("utf-8") + + # Test that pyarrow can handle a file opened with get_handle + def test_get_handle_pyarrow_compat(self): + pa_csv = pytest.importorskip("pyarrow.csv") + + # Test latin1, ucs-2, and ucs-4 chars + data = """a,b,c +1,2,3 +©,®,® +Look,a snake,🐍""" + expected = pd.DataFrame( + {"a": ["1", "©", "Look"], "b": ["2", "®", "a snake"], "c": ["3", "®", "🐍"]} + ) + s = StringIO(data) + with icom.get_handle(s, "rb", is_text=False) as handles: + df = pa_csv.read_csv(handles.handle).to_pandas() + tm.assert_frame_equal(df, expected) + assert not s.closed + + def test_iterator(self): + with pd.read_csv(StringIO(self.data1), chunksize=1) as reader: + result = pd.concat(reader, ignore_index=True) + expected = pd.read_csv(StringIO(self.data1)) + tm.assert_frame_equal(result, expected) + + # GH12153 + with pd.read_csv(StringIO(self.data1), chunksize=1) as it: + first = next(it) + tm.assert_frame_equal(first, expected.iloc[[0]]) + tm.assert_frame_equal(pd.concat(it), expected.iloc[1:]) + + @pytest.mark.parametrize( + "reader, module, error_class, fn_ext", + [ + (pd.read_csv, "os", FileNotFoundError, "csv"), + (pd.read_fwf, "os", FileNotFoundError, "txt"), + (pd.read_excel, "xlrd", FileNotFoundError, "xlsx"), + (pd.read_feather, "pyarrow", OSError, "feather"), + (pd.read_hdf, "tables", FileNotFoundError, "h5"), + (pd.read_stata, "os", FileNotFoundError, "dta"), + (pd.read_sas, "os", FileNotFoundError, "sas7bdat"), + (pd.read_json, "os", FileNotFoundError, "json"), + (pd.read_pickle, "os", FileNotFoundError, "pickle"), + ], + ) + def test_read_non_existent(self, reader, module, error_class, fn_ext): + pytest.importorskip(module) + + path = os.path.join(HERE, "data", "does_not_exist." + fn_ext) + msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist" + msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'" + msg3 = "Expected object or value" + msg4 = "path_or_buf needs to be a string file path or file-like" + msg5 = ( + rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: " + rf"'.+does_not_exist\.{fn_ext}'" + ) + msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'" + msg7 = ( + rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'" + ) + msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}" + + with pytest.raises( + error_class, + match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})", + ): + reader(path) + + @pytest.mark.parametrize( + "method, module, error_class, fn_ext", + [ + (pd.DataFrame.to_csv, "os", OSError, "csv"), + (pd.DataFrame.to_html, "os", OSError, "html"), + (pd.DataFrame.to_excel, "xlrd", OSError, "xlsx"), + (pd.DataFrame.to_feather, "pyarrow", OSError, "feather"), + (pd.DataFrame.to_parquet, "pyarrow", OSError, "parquet"), + (pd.DataFrame.to_stata, "os", OSError, "dta"), + (pd.DataFrame.to_json, "os", OSError, "json"), + (pd.DataFrame.to_pickle, "os", OSError, "pickle"), + ], + ) + # NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables + def test_write_missing_parent_directory(self, method, module, error_class, fn_ext): + pytest.importorskip(module) + + dummy_frame = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]}) + + path = os.path.join(HERE, "data", "missing_folder", "does_not_exist." + fn_ext) + + with pytest.raises( + error_class, + match=r"Cannot save file into a non-existent directory: .*missing_folder", + ): + method(dummy_frame, path) + + @pytest.mark.parametrize( + "reader, module, error_class, fn_ext", + [ + (pd.read_csv, "os", FileNotFoundError, "csv"), + (pd.read_table, "os", FileNotFoundError, "csv"), + (pd.read_fwf, "os", FileNotFoundError, "txt"), + (pd.read_excel, "xlrd", FileNotFoundError, "xlsx"), + (pd.read_feather, "pyarrow", OSError, "feather"), + (pd.read_hdf, "tables", FileNotFoundError, "h5"), + (pd.read_stata, "os", FileNotFoundError, "dta"), + (pd.read_sas, "os", FileNotFoundError, "sas7bdat"), + (pd.read_json, "os", FileNotFoundError, "json"), + (pd.read_pickle, "os", FileNotFoundError, "pickle"), + ], + ) + def test_read_expands_user_home_dir( + self, reader, module, error_class, fn_ext, monkeypatch + ): + pytest.importorskip(module) + + path = os.path.join("~", "does_not_exist." + fn_ext) + monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x)) + + msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist" + msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'" + msg3 = "Unexpected character found when decoding 'false'" + msg4 = "path_or_buf needs to be a string file path or file-like" + msg5 = ( + rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: " + rf"'.+does_not_exist\.{fn_ext}'" + ) + msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'" + msg7 = ( + rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'" + ) + msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}" + + with pytest.raises( + error_class, + match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})", + ): + reader(path) + + @pytest.mark.parametrize( + "reader, module, path", + [ + (pd.read_csv, "os", ("io", "data", "csv", "iris.csv")), + (pd.read_table, "os", ("io", "data", "csv", "iris.csv")), + ( + pd.read_fwf, + "os", + ("io", "data", "fixed_width", "fixed_width_format.txt"), + ), + (pd.read_excel, "xlrd", ("io", "data", "excel", "test1.xlsx")), + ( + pd.read_feather, + "pyarrow", + ("io", "data", "feather", "feather-0_3_1.feather"), + ), + ( + pd.read_hdf, + "tables", + ("io", "data", "legacy_hdf", "datetimetz_object.h5"), + ), + (pd.read_stata, "os", ("io", "data", "stata", "stata10_115.dta")), + (pd.read_sas, "os", ("io", "sas", "data", "test1.sas7bdat")), + (pd.read_json, "os", ("io", "json", "data", "tsframe_v012.json")), + ( + pd.read_pickle, + "os", + ("io", "data", "pickle", "categorical.0.25.0.pickle"), + ), + ], + ) + def test_read_fspath_all(self, reader, module, path, datapath): + pytest.importorskip(module) + path = datapath(*path) + + mypath = CustomFSPath(path) + result = reader(mypath) + expected = reader(path) + + if path.endswith(".pickle"): + # categorical + tm.assert_categorical_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "writer_name, writer_kwargs, module", + [ + ("to_csv", {}, "os"), + ("to_excel", {"engine": "openpyxl"}, "openpyxl"), + ("to_feather", {}, "pyarrow"), + ("to_html", {}, "os"), + ("to_json", {}, "os"), + ("to_latex", {}, "os"), + ("to_pickle", {}, "os"), + ("to_stata", {"time_stamp": pd.to_datetime("2019-01-01 00:00")}, "os"), + ], + ) + def test_write_fspath_all(self, writer_name, writer_kwargs, module): + if writer_name in ["to_latex"]: # uses Styler implementation + pytest.importorskip("jinja2") + p1 = tm.ensure_clean("string") + p2 = tm.ensure_clean("fspath") + df = pd.DataFrame({"A": [1, 2]}) + + with p1 as string, p2 as fspath: + pytest.importorskip(module) + mypath = CustomFSPath(fspath) + writer = getattr(df, writer_name) + + writer(string, **writer_kwargs) + writer(mypath, **writer_kwargs) + with open(string, "rb") as f_str, open(fspath, "rb") as f_path: + if writer_name == "to_excel": + # binary representation of excel contains time creation + # data that causes flaky CI failures + result = pd.read_excel(f_str, **writer_kwargs) + expected = pd.read_excel(f_path, **writer_kwargs) + tm.assert_frame_equal(result, expected) + else: + result = f_str.read() + expected = f_path.read() + assert result == expected + + def test_write_fspath_hdf5(self): + # Same test as write_fspath_all, except HDF5 files aren't + # necessarily byte-for-byte identical for a given dataframe, so we'll + # have to read and compare equality + pytest.importorskip("tables") + + df = pd.DataFrame({"A": [1, 2]}) + p1 = tm.ensure_clean("string") + p2 = tm.ensure_clean("fspath") + + with p1 as string, p2 as fspath: + mypath = CustomFSPath(fspath) + df.to_hdf(mypath, key="bar") + df.to_hdf(string, key="bar") + + result = pd.read_hdf(fspath, key="bar") + expected = pd.read_hdf(string, key="bar") + + tm.assert_frame_equal(result, expected) + + +@pytest.fixture +def mmap_file(datapath): + return datapath("io", "data", "csv", "test_mmap.csv") + + +class TestMMapWrapper: + def test_constructor_bad_file(self, mmap_file): + non_file = StringIO("I am not a file") + non_file.fileno = lambda: -1 + + # the error raised is different on Windows + if is_platform_windows(): + msg = "The parameter is incorrect" + err = OSError + else: + msg = "[Errno 22]" + err = mmap.error + + with pytest.raises(err, match=msg): + icom._maybe_memory_map(non_file, True) + + with open(mmap_file, encoding="utf-8") as target: + pass + + msg = "I/O operation on closed file" + with pytest.raises(ValueError, match=msg): + icom._maybe_memory_map(target, True) + + def test_next(self, mmap_file): + with open(mmap_file, encoding="utf-8") as target: + lines = target.readlines() + + with icom.get_handle( + target, "r", is_text=True, memory_map=True + ) as wrappers: + wrapper = wrappers.handle + assert isinstance(wrapper.buffer.buffer, mmap.mmap) + + for line in lines: + next_line = next(wrapper) + assert next_line.strip() == line.strip() + + with pytest.raises(StopIteration, match=r"^$"): + next(wrapper) + + def test_unknown_engine(self): + with tm.ensure_clean() as path: + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_csv(path) + with pytest.raises(ValueError, match="Unknown engine"): + pd.read_csv(path, engine="pyt") + + def test_binary_mode(self): + """ + 'encoding' shouldn't be passed to 'open' in binary mode. + + GH 35058 + """ + with tm.ensure_clean() as path: + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_csv(path, mode="w+b") + tm.assert_frame_equal(df, pd.read_csv(path, index_col=0)) + + @pytest.mark.parametrize("encoding", ["utf-16", "utf-32"]) + @pytest.mark.parametrize("compression_", ["bz2", "xz"]) + def test_warning_missing_utf_bom(self, encoding, compression_): + """ + bz2 and xz do not write the byte order mark (BOM) for utf-16/32. + + https://stackoverflow.com/questions/55171439 + + GH 35681 + """ + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with tm.ensure_clean() as path: + with tm.assert_produces_warning(UnicodeWarning): + df.to_csv(path, compression=compression_, encoding=encoding) + + # reading should fail (otherwise we wouldn't need the warning) + msg = r"UTF-\d+ stream does not start with BOM" + with pytest.raises(UnicodeError, match=msg): + pd.read_csv(path, compression=compression_, encoding=encoding) + + +def test_is_fsspec_url(): + assert icom.is_fsspec_url("gcs://pandas/somethingelse.com") + assert icom.is_fsspec_url("gs://pandas/somethingelse.com") + # the following is the only remote URL that is handled without fsspec + assert not icom.is_fsspec_url("http://pandas/somethingelse.com") + assert not icom.is_fsspec_url("random:pandas/somethingelse.com") + assert not icom.is_fsspec_url("/local/path") + assert not icom.is_fsspec_url("relative/local/path") + # fsspec URL in string should not be recognized + assert not icom.is_fsspec_url("this is not fsspec://url") + assert not icom.is_fsspec_url("{'url': 'gs://pandas/somethingelse.com'}") + # accept everything that conforms to RFC 3986 schema + assert icom.is_fsspec_url("RFC-3986+compliant.spec://something") + + +@pytest.mark.parametrize("encoding", [None, "utf-8"]) +@pytest.mark.parametrize("format", ["csv", "json"]) +def test_codecs_encoding(encoding, format): + # GH39247 + expected = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with tm.ensure_clean() as path: + with codecs.open(path, mode="w", encoding=encoding) as handle: + getattr(expected, f"to_{format}")(handle) + with codecs.open(path, mode="r", encoding=encoding) as handle: + if format == "csv": + df = pd.read_csv(handle, index_col=0) + else: + df = pd.read_json(handle) + tm.assert_frame_equal(expected, df) + + +def test_codecs_get_writer_reader(): + # GH39247 + expected = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with tm.ensure_clean() as path: + with open(path, "wb") as handle: + with codecs.getwriter("utf-8")(handle) as encoded: + expected.to_csv(encoded) + with open(path, "rb") as handle: + with codecs.getreader("utf-8")(handle) as encoded: + df = pd.read_csv(encoded, index_col=0) + tm.assert_frame_equal(expected, df) + + +@pytest.mark.parametrize( + "io_class,mode,msg", + [ + (BytesIO, "t", "a bytes-like object is required, not 'str'"), + (StringIO, "b", "string argument expected, got 'bytes'"), + ], +) +def test_explicit_encoding(io_class, mode, msg): + # GH39247; this test makes sure that if a user provides mode="*t" or "*b", + # it is used. In the case of this test it leads to an error as intentionally the + # wrong mode is requested + expected = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with io_class() as buffer: + with pytest.raises(TypeError, match=msg): + expected.to_csv(buffer, mode=f"w{mode}") + + +@pytest.mark.parametrize("encoding_errors", [None, "strict", "replace"]) +@pytest.mark.parametrize("format", ["csv", "json"]) +def test_encoding_errors(encoding_errors, format): + # GH39450 + msg = "'utf-8' codec can't decode byte" + bad_encoding = b"\xe4" + + if format == "csv": + content = b"," + bad_encoding + b"\n" + bad_encoding * 2 + b"," + bad_encoding + reader = partial(pd.read_csv, index_col=0) + else: + content = ( + b'{"' + + bad_encoding * 2 + + b'": {"' + + bad_encoding + + b'":"' + + bad_encoding + + b'"}}' + ) + reader = partial(pd.read_json, orient="index") + with tm.ensure_clean() as path: + file = Path(path) + file.write_bytes(content) + + if encoding_errors != "replace": + with pytest.raises(UnicodeDecodeError, match=msg): + reader(path, encoding_errors=encoding_errors) + else: + df = reader(path, encoding_errors=encoding_errors) + decoded = bad_encoding.decode(errors=encoding_errors) + expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2]) + tm.assert_frame_equal(df, expected) + + +def test_bad_encdoing_errors(): + # GH 39777 + with tm.ensure_clean() as path: + with pytest.raises(LookupError, match="unknown error handler name"): + icom.get_handle(path, "w", errors="bad") + + +def test_errno_attribute(): + # GH 13872 + with pytest.raises(FileNotFoundError, match="\\[Errno 2\\]") as err: + pd.read_csv("doesnt_exist") + assert err.errno == errno.ENOENT + + +def test_fail_mmap(): + with pytest.raises(UnsupportedOperation, match="fileno"): + with BytesIO() as buffer: + icom.get_handle(buffer, "rb", memory_map=True) + + +def test_close_on_error(): + # GH 47136 + class TestError: + def close(self): + raise OSError("test") + + with pytest.raises(OSError, match="test"): + with BytesIO() as buffer: + with icom.get_handle(buffer, "rb") as handles: + handles.created_handles.append(TestError()) + + +@pytest.mark.parametrize( + "reader", + [ + pd.read_csv, + pd.read_fwf, + pd.read_excel, + pd.read_feather, + pd.read_hdf, + pd.read_stata, + pd.read_sas, + pd.read_json, + pd.read_pickle, + ], +) +def test_pickle_reader(reader): + # GH 22265 + with BytesIO() as buffer: + pickle.dump(reader, buffer) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_compression.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..3a58dda9e8dc47f2072e0175c120036523ed83f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_compression.py @@ -0,0 +1,378 @@ +import gzip +import io +import os +from pathlib import Path +import subprocess +import sys +import tarfile +import textwrap +import time +import zipfile + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +import pandas._testing as tm + +import pandas.io.common as icom + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"]) +def test_compression_size(obj, method, compression_only): + if compression_only == "tar": + compression_only = {"method": "tar", "mode": "w:gz"} + + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression=compression_only) + compressed_size = os.path.getsize(path) + getattr(obj, method)(path, compression=None) + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_csv", "to_json"]) +def test_compression_size_fh(obj, method, compression_only): + with tm.ensure_clean() as path: + with icom.get_handle( + path, + "w:gz" if compression_only == "tar" else "w", + compression=compression_only, + ) as handles: + getattr(obj, method)(handles.handle) + assert not handles.handle.closed + compressed_size = os.path.getsize(path) + with tm.ensure_clean() as path: + with icom.get_handle(path, "w", compression=None) as handles: + getattr(obj, method)(handles.handle) + assert not handles.handle.closed + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size + + +@pytest.mark.parametrize( + "write_method, write_kwargs, read_method", + [ + ("to_csv", {"index": False}, pd.read_csv), + ("to_json", {}, pd.read_json), + ("to_pickle", {}, pd.read_pickle), + ], +) +def test_dataframe_compression_defaults_to_infer( + write_method, write_kwargs, read_method, compression_only, compression_to_extension +): + # GH22004 + input = pd.DataFrame([[1.0, 0, -4], [3.4, 5, 2]], columns=["X", "Y", "Z"]) + extension = compression_to_extension[compression_only] + with tm.ensure_clean("compressed" + extension) as path: + getattr(input, write_method)(path, **write_kwargs) + output = read_method(path, compression=compression_only) + tm.assert_frame_equal(output, input) + + +@pytest.mark.parametrize( + "write_method,write_kwargs,read_method,read_kwargs", + [ + ("to_csv", {"index": False, "header": True}, pd.read_csv, {"squeeze": True}), + ("to_json", {}, pd.read_json, {"typ": "series"}), + ("to_pickle", {}, pd.read_pickle, {}), + ], +) +def test_series_compression_defaults_to_infer( + write_method, + write_kwargs, + read_method, + read_kwargs, + compression_only, + compression_to_extension, +): + # GH22004 + input = pd.Series([0, 5, -2, 10], name="X") + extension = compression_to_extension[compression_only] + with tm.ensure_clean("compressed" + extension) as path: + getattr(input, write_method)(path, **write_kwargs) + if "squeeze" in read_kwargs: + kwargs = read_kwargs.copy() + del kwargs["squeeze"] + output = read_method(path, compression=compression_only, **kwargs).squeeze( + "columns" + ) + else: + output = read_method(path, compression=compression_only, **read_kwargs) + tm.assert_series_equal(output, input, check_names=False) + + +def test_compression_warning(compression_only): + # Assert that passing a file object to to_csv while explicitly specifying a + # compression protocol triggers a RuntimeWarning, as per GH21227. + df = pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ) + with tm.ensure_clean() as path: + with icom.get_handle(path, "w", compression=compression_only) as handles: + with tm.assert_produces_warning(RuntimeWarning): + df.to_csv(handles.handle, compression=compression_only) + + +def test_compression_binary(compression_only): + """ + Binary file handles support compression. + + GH22555 + """ + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # with a file + with tm.ensure_clean() as path: + with open(path, mode="wb") as file: + df.to_csv(file, mode="wb", compression=compression_only) + file.seek(0) # file shouldn't be closed + tm.assert_frame_equal( + df, pd.read_csv(path, index_col=0, compression=compression_only) + ) + + # with BytesIO + file = io.BytesIO() + df.to_csv(file, mode="wb", compression=compression_only) + file.seek(0) # file shouldn't be closed + tm.assert_frame_equal( + df, pd.read_csv(file, index_col=0, compression=compression_only) + ) + + +def test_gzip_reproducibility_file_name(): + """ + Gzip should create reproducible archives with mtime. + + Note: Archives created with different filenames will still be different! + + GH 28103 + """ + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + compression_options = {"method": "gzip", "mtime": 1} + + # test for filename + with tm.ensure_clean() as path: + path = Path(path) + df.to_csv(path, compression=compression_options) + time.sleep(0.1) + output = path.read_bytes() + df.to_csv(path, compression=compression_options) + assert output == path.read_bytes() + + +def test_gzip_reproducibility_file_object(): + """ + Gzip should create reproducible archives with mtime. + + GH 28103 + """ + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + compression_options = {"method": "gzip", "mtime": 1} + + # test for file object + buffer = io.BytesIO() + df.to_csv(buffer, compression=compression_options, mode="wb") + output = buffer.getvalue() + time.sleep(0.1) + buffer = io.BytesIO() + df.to_csv(buffer, compression=compression_options, mode="wb") + assert output == buffer.getvalue() + + +@pytest.mark.single_cpu +def test_with_missing_lzma(): + """Tests if import pandas works when lzma is not present.""" + # https://github.com/pandas-dev/pandas/issues/27575 + code = textwrap.dedent( + """\ + import sys + sys.modules['lzma'] = None + import pandas + """ + ) + subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE) + + +@pytest.mark.single_cpu +def test_with_missing_lzma_runtime(): + """Tests if RuntimeError is hit when calling lzma without + having the module available. + """ + code = textwrap.dedent( + """ + import sys + import pytest + sys.modules['lzma'] = None + import pandas as pd + df = pd.DataFrame() + with pytest.raises(RuntimeError, match='lzma module'): + df.to_csv('foo.csv', compression='xz') + """ + ) + subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE) + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"]) +def test_gzip_compression_level(obj, method): + # GH33196 + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression="gzip") + compressed_size_default = os.path.getsize(path) + getattr(obj, method)(path, compression={"method": "gzip", "compresslevel": 1}) + compressed_size_fast = os.path.getsize(path) + assert compressed_size_default < compressed_size_fast + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"]) +def test_xz_compression_level_read(obj, method): + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression="xz") + compressed_size_default = os.path.getsize(path) + getattr(obj, method)(path, compression={"method": "xz", "preset": 1}) + compressed_size_fast = os.path.getsize(path) + assert compressed_size_default < compressed_size_fast + if method == "to_csv": + pd.read_csv(path, compression="xz") + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"]) +def test_bzip_compression_level(obj, method): + """GH33196 bzip needs file size > 100k to show a size difference between + compression levels, so here we just check if the call works when + compression is passed as a dict. + """ + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression={"method": "bz2", "compresslevel": 1}) + + +@pytest.mark.parametrize( + "suffix,archive", + [ + (".zip", zipfile.ZipFile), + (".tar", tarfile.TarFile), + ], +) +def test_empty_archive_zip(suffix, archive): + with tm.ensure_clean(filename=suffix) as path: + with archive(path, "w"): + pass + with pytest.raises(ValueError, match="Zero files found"): + pd.read_csv(path) + + +def test_ambiguous_archive_zip(): + with tm.ensure_clean(filename=".zip") as path: + with zipfile.ZipFile(path, "w") as file: + file.writestr("a.csv", "foo,bar") + file.writestr("b.csv", "foo,bar") + with pytest.raises(ValueError, match="Multiple files found in ZIP file"): + pd.read_csv(path) + + +def test_ambiguous_archive_tar(tmp_path): + csvAPath = tmp_path / "a.csv" + with open(csvAPath, "w", encoding="utf-8") as a: + a.write("foo,bar\n") + csvBPath = tmp_path / "b.csv" + with open(csvBPath, "w", encoding="utf-8") as b: + b.write("foo,bar\n") + + tarpath = tmp_path / "archive.tar" + with tarfile.TarFile(tarpath, "w") as tar: + tar.add(csvAPath, "a.csv") + tar.add(csvBPath, "b.csv") + + with pytest.raises(ValueError, match="Multiple files found in TAR archive"): + pd.read_csv(tarpath) + + +def test_tar_gz_to_different_filename(): + with tm.ensure_clean(filename=".foo") as file: + pd.DataFrame( + [["1", "2"]], + columns=["foo", "bar"], + ).to_csv(file, compression={"method": "tar", "mode": "w:gz"}, index=False) + with gzip.open(file) as uncompressed: + with tarfile.TarFile(fileobj=uncompressed) as archive: + members = archive.getmembers() + assert len(members) == 1 + content = archive.extractfile(members[0]).read().decode("utf8") + + if is_platform_windows(): + expected = "foo,bar\r\n1,2\r\n" + else: + expected = "foo,bar\n1,2\n" + + assert content == expected + + +def test_tar_no_error_on_close(): + with io.BytesIO() as buffer: + with icom._BytesTarFile(fileobj=buffer, mode="w"): + pass diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_feather.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_feather.py new file mode 100644 index 0000000000000000000000000000000000000000..22a7d3b83a459a5dc48ee5d56c2f70130d644be4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_feather.py @@ -0,0 +1,252 @@ +""" test feather-format compat """ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +from pandas.io.feather_format import read_feather, to_feather # isort:skip + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +pa = pytest.importorskip("pyarrow") + + +@pytest.mark.single_cpu +class TestFeather: + def check_error_on_write(self, df, exc, err_msg): + # check that we are raising the exception + # on writing + + with pytest.raises(exc, match=err_msg): + with tm.ensure_clean() as path: + to_feather(df, path) + + def check_external_error_on_write(self, df): + # check that we are raising the exception + # on writing + + with tm.external_error_raised(Exception): + with tm.ensure_clean() as path: + to_feather(df, path) + + def check_round_trip(self, df, expected=None, write_kwargs={}, **read_kwargs): + if expected is None: + expected = df.copy() + + with tm.ensure_clean() as path: + to_feather(df, path, **write_kwargs) + + result = read_feather(path, **read_kwargs) + + tm.assert_frame_equal(result, expected) + + def test_error(self): + msg = "feather only support IO with DataFrames" + for obj in [ + pd.Series([1, 2, 3]), + 1, + "foo", + pd.Timestamp("20130101"), + np.array([1, 2, 3]), + ]: + self.check_error_on_write(obj, ValueError, msg) + + def test_basic(self): + df = pd.DataFrame( + { + "string": list("abc"), + "int": list(range(1, 4)), + "uint": np.arange(3, 6).astype("u1"), + "float": np.arange(4.0, 7.0, dtype="float64"), + "float_with_null": [1.0, np.nan, 3], + "bool": [True, False, True], + "bool_with_null": [True, np.nan, False], + "cat": pd.Categorical(list("abc")), + "dt": pd.DatetimeIndex( + list(pd.date_range("20130101", periods=3)), freq=None + ), + "dttz": pd.DatetimeIndex( + list(pd.date_range("20130101", periods=3, tz="US/Eastern")), + freq=None, + ), + "dt_with_null": [ + pd.Timestamp("20130101"), + pd.NaT, + pd.Timestamp("20130103"), + ], + "dtns": pd.DatetimeIndex( + list(pd.date_range("20130101", periods=3, freq="ns")), freq=None + ), + } + ) + df["periods"] = pd.period_range("2013", freq="M", periods=3) + df["timedeltas"] = pd.timedelta_range("1 day", periods=3) + df["intervals"] = pd.interval_range(0, 3, 3) + + assert df.dttz.dtype.tz.zone == "US/Eastern" + + expected = df.copy() + expected.loc[1, "bool_with_null"] = None + self.check_round_trip(df, expected=expected) + + def test_duplicate_columns(self): + # https://github.com/wesm/feather/issues/53 + # not currently able to handle duplicate columns + df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy() + self.check_external_error_on_write(df) + + def test_read_columns(self): + # GH 24025 + df = pd.DataFrame( + { + "col1": list("abc"), + "col2": list(range(1, 4)), + "col3": list("xyz"), + "col4": list(range(4, 7)), + } + ) + columns = ["col1", "col3"] + self.check_round_trip(df, expected=df[columns], columns=columns) + + def test_read_columns_different_order(self): + # GH 33878 + df = pd.DataFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]}) + expected = df[["B", "A"]] + self.check_round_trip(df, expected, columns=["B", "A"]) + + def test_unsupported_other(self): + # mixed python objects + df = pd.DataFrame({"a": ["a", 1, 2.0]}) + self.check_external_error_on_write(df) + + def test_rw_use_threads(self): + df = pd.DataFrame({"A": np.arange(100000)}) + self.check_round_trip(df, use_threads=True) + self.check_round_trip(df, use_threads=False) + + def test_path_pathlib(self): + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ).reset_index() + result = tm.round_trip_pathlib(df.to_feather, read_feather) + tm.assert_frame_equal(df, result) + + def test_path_localpath(self): + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ).reset_index() + result = tm.round_trip_localpath(df.to_feather, read_feather) + tm.assert_frame_equal(df, result) + + def test_passthrough_keywords(self): + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ).reset_index() + self.check_round_trip(df, write_kwargs={"version": 1}) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_http_path(self, feather_file, httpserver): + # GH 29055 + expected = read_feather(feather_file) + with open(feather_file, "rb") as f: + httpserver.serve_content(content=f.read()) + res = read_feather(httpserver.url) + tm.assert_frame_equal(expected, res) + + def test_read_feather_dtype_backend(self, string_storage, dtype_backend): + # GH#50765 + df = pd.DataFrame( + { + "a": pd.Series([1, np.nan, 3], dtype="Int64"), + "b": pd.Series([1, 2, 3], dtype="Int64"), + "c": pd.Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": pd.Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": [True, False, None], + "f": [True, False, True], + "g": ["a", "b", "c"], + "h": ["a", "b", None], + } + ) + + if string_storage == "python": + string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) + string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) + + else: + string_array = ArrowStringArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArray(pa.array(["a", "b", None])) + + with tm.ensure_clean() as path: + to_feather(df, path) + with pd.option_context("mode.string_storage", string_storage): + result = read_feather(path, dtype_backend=dtype_backend) + + expected = pd.DataFrame( + { + "a": pd.Series([1, np.nan, 3], dtype="Int64"), + "b": pd.Series([1, 2, 3], dtype="Int64"), + "c": pd.Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": pd.Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": pd.Series([True, False, pd.NA], dtype="boolean"), + "f": pd.Series([True, False, True], dtype="boolean"), + "g": string_array, + "h": string_array_na, + } + ) + + if dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + expected = pd.DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + + tm.assert_frame_equal(result, expected) + + def test_int_columns_and_index(self): + df = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index([3, 4, 5], name="test")) + self.check_round_trip(df) + + def test_invalid_dtype_backend(self): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + df = pd.DataFrame({"int": list(range(1, 4))}) + with tm.ensure_clean("tmp.feather") as path: + df.to_feather(path) + with pytest.raises(ValueError, match=msg): + read_feather(path, dtype_backend="numpy") + + def test_string_inference(self, tmp_path): + # GH#54431 + path = tmp_path / "test_string_inference.p" + df = pd.DataFrame(data={"a": ["x", "y"]}) + df.to_feather(path) + with pd.option_context("future.infer_string", True): + result = read_feather(path) + expected = pd.DataFrame(data={"a": ["x", "y"]}, dtype="string[pyarrow_numpy]") + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_fsspec.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_fsspec.py new file mode 100644 index 0000000000000000000000000000000000000000..a1dec8a2d05b4fc4c39cbc910544532bf4eb0cca --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_fsspec.py @@ -0,0 +1,345 @@ +import io + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + date_range, + read_csv, + read_excel, + read_feather, + read_json, + read_parquet, + read_pickle, + read_stata, + read_table, +) +import pandas._testing as tm +from pandas.util import _test_decorators as td + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def fsspectest(): + pytest.importorskip("fsspec") + from fsspec import register_implementation + from fsspec.implementations.memory import MemoryFileSystem + from fsspec.registry import _registry as registry + + class TestMemoryFS(MemoryFileSystem): + protocol = "testmem" + test = [None] + + def __init__(self, **kwargs) -> None: + self.test[0] = kwargs.pop("test", None) + super().__init__(**kwargs) + + register_implementation("testmem", TestMemoryFS, clobber=True) + yield TestMemoryFS() + registry.pop("testmem", None) + TestMemoryFS.test[0] = None + TestMemoryFS.store.clear() + + +@pytest.fixture +def df1(): + return DataFrame( + { + "int": [1, 3], + "float": [2.0, np.nan], + "str": ["t", "s"], + "dt": date_range("2018-06-18", periods=2), + } + ) + + +@pytest.fixture +def cleared_fs(): + fsspec = pytest.importorskip("fsspec") + + memfs = fsspec.filesystem("memory") + yield memfs + memfs.store.clear() + + +def test_read_csv(cleared_fs, df1): + text = str(df1.to_csv(index=False)).encode() + with cleared_fs.open("test/test.csv", "wb") as w: + w.write(text) + df2 = read_csv("memory://test/test.csv", parse_dates=["dt"]) + + tm.assert_frame_equal(df1, df2) + + +def test_reasonable_error(monkeypatch, cleared_fs): + from fsspec.registry import known_implementations + + with pytest.raises(ValueError, match="nosuchprotocol"): + read_csv("nosuchprotocol://test/test.csv") + err_msg = "test error message" + monkeypatch.setitem( + known_implementations, + "couldexist", + {"class": "unimportable.CouldExist", "err": err_msg}, + ) + with pytest.raises(ImportError, match=err_msg): + read_csv("couldexist://test/test.csv") + + +def test_to_csv(cleared_fs, df1): + df1.to_csv("memory://test/test.csv", index=True) + + df2 = read_csv("memory://test/test.csv", parse_dates=["dt"], index_col=0) + + tm.assert_frame_equal(df1, df2) + + +def test_to_excel(cleared_fs, df1): + pytest.importorskip("openpyxl") + ext = "xlsx" + path = f"memory://test/test.{ext}" + df1.to_excel(path, index=True) + + df2 = read_excel(path, parse_dates=["dt"], index_col=0) + + tm.assert_frame_equal(df1, df2) + + +@pytest.mark.parametrize("binary_mode", [False, True]) +def test_to_csv_fsspec_object(cleared_fs, binary_mode, df1): + fsspec = pytest.importorskip("fsspec") + + path = "memory://test/test.csv" + mode = "wb" if binary_mode else "w" + with fsspec.open(path, mode=mode).open() as fsspec_object: + df1.to_csv(fsspec_object, index=True) + assert not fsspec_object.closed + + mode = mode.replace("w", "r") + with fsspec.open(path, mode=mode) as fsspec_object: + df2 = read_csv( + fsspec_object, + parse_dates=["dt"], + index_col=0, + ) + assert not fsspec_object.closed + + tm.assert_frame_equal(df1, df2) + + +def test_csv_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_csv( + "testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False + ) + assert fsspectest.test[0] == "csv_write" + read_csv("testmem://test/test.csv", storage_options={"test": "csv_read"}) + assert fsspectest.test[0] == "csv_read" + + +def test_read_table_options(fsspectest): + # GH #39167 + df = DataFrame({"a": [0]}) + df.to_csv( + "testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False + ) + assert fsspectest.test[0] == "csv_write" + read_table("testmem://test/test.csv", storage_options={"test": "csv_read"}) + assert fsspectest.test[0] == "csv_read" + + +def test_excel_options(fsspectest): + pytest.importorskip("openpyxl") + extension = "xlsx" + + df = DataFrame({"a": [0]}) + + path = f"testmem://test/test.{extension}" + + df.to_excel(path, storage_options={"test": "write"}, index=False) + assert fsspectest.test[0] == "write" + read_excel(path, storage_options={"test": "read"}) + assert fsspectest.test[0] == "read" + + +def test_to_parquet_new_file(cleared_fs, df1): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + pytest.importorskip("fastparquet") + + df1.to_parquet( + "memory://test/test.csv", index=True, engine="fastparquet", compression=None + ) + + +def test_arrowparquet_options(fsspectest): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + pytest.importorskip("pyarrow") + df = DataFrame({"a": [0]}) + df.to_parquet( + "testmem://test/test.csv", + engine="pyarrow", + compression=None, + storage_options={"test": "parquet_write"}, + ) + assert fsspectest.test[0] == "parquet_write" + read_parquet( + "testmem://test/test.csv", + engine="pyarrow", + storage_options={"test": "parquet_read"}, + ) + assert fsspectest.test[0] == "parquet_read" + + +@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet +def test_fastparquet_options(fsspectest): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + pytest.importorskip("fastparquet") + + df = DataFrame({"a": [0]}) + df.to_parquet( + "testmem://test/test.csv", + engine="fastparquet", + compression=None, + storage_options={"test": "parquet_write"}, + ) + assert fsspectest.test[0] == "parquet_write" + read_parquet( + "testmem://test/test.csv", + engine="fastparquet", + storage_options={"test": "parquet_read"}, + ) + assert fsspectest.test[0] == "parquet_read" + + +@pytest.mark.single_cpu +def test_from_s3_csv(s3_public_bucket_with_data, tips_file, s3so): + pytest.importorskip("s3fs") + tm.assert_equal( + read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv", storage_options=s3so + ), + read_csv(tips_file), + ) + # the following are decompressed by pandas, not fsspec + tm.assert_equal( + read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv.gz", storage_options=s3so + ), + read_csv(tips_file), + ) + tm.assert_equal( + read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv.bz2", storage_options=s3so + ), + read_csv(tips_file), + ) + + +@pytest.mark.single_cpu +@pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"]) +def test_s3_protocols(s3_public_bucket_with_data, tips_file, protocol, s3so): + pytest.importorskip("s3fs") + tm.assert_equal( + read_csv( + f"{protocol}://{s3_public_bucket_with_data.name}/tips.csv", + storage_options=s3so, + ), + read_csv(tips_file), + ) + + +@pytest.mark.single_cpu +@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet +def test_s3_parquet(s3_public_bucket, s3so, df1): + pytest.importorskip("fastparquet") + pytest.importorskip("s3fs") + + fn = f"s3://{s3_public_bucket.name}/test.parquet" + df1.to_parquet( + fn, index=False, engine="fastparquet", compression=None, storage_options=s3so + ) + df2 = read_parquet(fn, engine="fastparquet", storage_options=s3so) + tm.assert_equal(df1, df2) + + +@td.skip_if_installed("fsspec") +def test_not_present_exception(): + msg = "Missing optional dependency 'fsspec'|fsspec library is required" + with pytest.raises(ImportError, match=msg): + read_csv("memory://test/test.csv") + + +def test_feather_options(fsspectest): + pytest.importorskip("pyarrow") + df = DataFrame({"a": [0]}) + df.to_feather("testmem://mockfile", storage_options={"test": "feather_write"}) + assert fsspectest.test[0] == "feather_write" + out = read_feather("testmem://mockfile", storage_options={"test": "feather_read"}) + assert fsspectest.test[0] == "feather_read" + tm.assert_frame_equal(df, out) + + +def test_pickle_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_pickle("testmem://mockfile", storage_options={"test": "pickle_write"}) + assert fsspectest.test[0] == "pickle_write" + out = read_pickle("testmem://mockfile", storage_options={"test": "pickle_read"}) + assert fsspectest.test[0] == "pickle_read" + tm.assert_frame_equal(df, out) + + +def test_json_options(fsspectest, compression): + df = DataFrame({"a": [0]}) + df.to_json( + "testmem://mockfile", + compression=compression, + storage_options={"test": "json_write"}, + ) + assert fsspectest.test[0] == "json_write" + out = read_json( + "testmem://mockfile", + compression=compression, + storage_options={"test": "json_read"}, + ) + assert fsspectest.test[0] == "json_read" + tm.assert_frame_equal(df, out) + + +def test_stata_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_stata( + "testmem://mockfile", storage_options={"test": "stata_write"}, write_index=False + ) + assert fsspectest.test[0] == "stata_write" + out = read_stata("testmem://mockfile", storage_options={"test": "stata_read"}) + assert fsspectest.test[0] == "stata_read" + tm.assert_frame_equal(df, out.astype("int64")) + + +def test_markdown_options(fsspectest): + pytest.importorskip("tabulate") + df = DataFrame({"a": [0]}) + df.to_markdown("testmem://mockfile", storage_options={"test": "md_write"}) + assert fsspectest.test[0] == "md_write" + assert fsspectest.cat("testmem://mockfile") + + +def test_non_fsspec_options(): + pytest.importorskip("pyarrow") + with pytest.raises(ValueError, match="storage_options"): + read_csv("localfile", storage_options={"a": True}) + with pytest.raises(ValueError, match="storage_options"): + # separate test for parquet, which has a different code path + read_parquet("localfile", storage_options={"a": True}) + by = io.BytesIO() + + with pytest.raises(ValueError, match="storage_options"): + read_csv(by, storage_options={"a": True}) + + df = DataFrame({"a": [0]}) + with pytest.raises(ValueError, match="storage_options"): + df.to_parquet("nonfsspecpath", storage_options={"a": True}) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_gbq.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_gbq.py new file mode 100644 index 0000000000000000000000000000000000000000..b2b212ceb2c41c9a8fa0828b691b6161db02d62f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_gbq.py @@ -0,0 +1,14 @@ +import pandas as pd +import pandas._testing as tm + + +def test_read_gbq_deprecated(): + with tm.assert_produces_warning(FutureWarning): + with tm.external_error_raised(Exception): + pd.read_gbq("fake") + + +def test_to_gbq_deprecated(): + with tm.assert_produces_warning(FutureWarning): + with tm.external_error_raised(Exception): + pd.DataFrame(range(1)).to_gbq("fake") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_gcs.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_gcs.py new file mode 100644 index 0000000000000000000000000000000000000000..0ce6a8bf82cd835d64ec7bab242ec6444973b64e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_gcs.py @@ -0,0 +1,219 @@ +from io import BytesIO +import os +import pathlib +import tarfile +import zipfile + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + date_range, + read_csv, + read_excel, + read_json, + read_parquet, +) +import pandas._testing as tm +from pandas.util import _test_decorators as td + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def gcs_buffer(): + """Emulate GCS using a binary buffer.""" + pytest.importorskip("gcsfs") + fsspec = pytest.importorskip("fsspec") + + gcs_buffer = BytesIO() + gcs_buffer.close = lambda: True + + class MockGCSFileSystem(fsspec.AbstractFileSystem): + @staticmethod + def open(*args, **kwargs): + gcs_buffer.seek(0) + return gcs_buffer + + def ls(self, path, **kwargs): + # needed for pyarrow + return [{"name": path, "type": "file"}] + + # Overwrites the default implementation from gcsfs to our mock class + fsspec.register_implementation("gs", MockGCSFileSystem, clobber=True) + + return gcs_buffer + + +# Patches pyarrow; other processes should not pick up change +@pytest.mark.single_cpu +@pytest.mark.parametrize("format", ["csv", "json", "parquet", "excel", "markdown"]) +def test_to_read_gcs(gcs_buffer, format, monkeypatch, capsys): + """ + Test that many to/read functions support GCS. + + GH 33987 + """ + + df1 = DataFrame( + { + "int": [1, 3], + "float": [2.0, np.nan], + "str": ["t", "s"], + "dt": date_range("2018-06-18", periods=2), + } + ) + + path = f"gs://test/test.{format}" + + if format == "csv": + df1.to_csv(path, index=True) + df2 = read_csv(path, parse_dates=["dt"], index_col=0) + elif format == "excel": + path = "gs://test/test.xlsx" + df1.to_excel(path) + df2 = read_excel(path, parse_dates=["dt"], index_col=0) + elif format == "json": + df1.to_json(path) + df2 = read_json(path, convert_dates=["dt"]) + elif format == "parquet": + pytest.importorskip("pyarrow") + pa_fs = pytest.importorskip("pyarrow.fs") + + class MockFileSystem(pa_fs.FileSystem): + @staticmethod + def from_uri(path): + print("Using pyarrow filesystem") + to_local = pathlib.Path(path.replace("gs://", "")).absolute().as_uri() + return pa_fs.LocalFileSystem(to_local) + + with monkeypatch.context() as m: + m.setattr(pa_fs, "FileSystem", MockFileSystem) + df1.to_parquet(path) + df2 = read_parquet(path) + captured = capsys.readouterr() + assert captured.out == "Using pyarrow filesystem\nUsing pyarrow filesystem\n" + elif format == "markdown": + pytest.importorskip("tabulate") + df1.to_markdown(path) + df2 = df1 + + tm.assert_frame_equal(df1, df2) + + +def assert_equal_zip_safe(result: bytes, expected: bytes, compression: str): + """ + For zip compression, only compare the CRC-32 checksum of the file contents + to avoid checking the time-dependent last-modified timestamp which + in some CI builds is off-by-one + + See https://en.wikipedia.org/wiki/ZIP_(file_format)#File_headers + """ + if compression == "zip": + # Only compare the CRC checksum of the file contents + with zipfile.ZipFile(BytesIO(result)) as exp, zipfile.ZipFile( + BytesIO(expected) + ) as res: + for res_info, exp_info in zip(res.infolist(), exp.infolist()): + assert res_info.CRC == exp_info.CRC + elif compression == "tar": + with tarfile.open(fileobj=BytesIO(result)) as tar_exp, tarfile.open( + fileobj=BytesIO(expected) + ) as tar_res: + for tar_res_info, tar_exp_info in zip( + tar_res.getmembers(), tar_exp.getmembers() + ): + actual_file = tar_res.extractfile(tar_res_info) + expected_file = tar_exp.extractfile(tar_exp_info) + assert (actual_file is None) == (expected_file is None) + if actual_file is not None and expected_file is not None: + assert actual_file.read() == expected_file.read() + else: + assert result == expected + + +@pytest.mark.parametrize("encoding", ["utf-8", "cp1251"]) +def test_to_csv_compression_encoding_gcs( + gcs_buffer, compression_only, encoding, compression_to_extension +): + """ + Compression and encoding should with GCS. + + GH 35677 (to_csv, compression), GH 26124 (to_csv, encoding), and + GH 32392 (read_csv, encoding) + """ + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # reference of compressed and encoded file + compression = {"method": compression_only} + if compression_only == "gzip": + compression["mtime"] = 1 # be reproducible + buffer = BytesIO() + df.to_csv(buffer, compression=compression, encoding=encoding, mode="wb") + + # write compressed file with explicit compression + path_gcs = "gs://test/test.csv" + df.to_csv(path_gcs, compression=compression, encoding=encoding) + res = gcs_buffer.getvalue() + expected = buffer.getvalue() + assert_equal_zip_safe(res, expected, compression_only) + + read_df = read_csv( + path_gcs, index_col=0, compression=compression_only, encoding=encoding + ) + tm.assert_frame_equal(df, read_df) + + # write compressed file with implicit compression + file_ext = compression_to_extension[compression_only] + compression["method"] = "infer" + path_gcs += f".{file_ext}" + df.to_csv(path_gcs, compression=compression, encoding=encoding) + + res = gcs_buffer.getvalue() + expected = buffer.getvalue() + assert_equal_zip_safe(res, expected, compression_only) + + read_df = read_csv(path_gcs, index_col=0, compression="infer", encoding=encoding) + tm.assert_frame_equal(df, read_df) + + +def test_to_parquet_gcs_new_file(monkeypatch, tmpdir): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + pytest.importorskip("fastparquet") + pytest.importorskip("gcsfs") + + from fsspec import AbstractFileSystem + + df1 = DataFrame( + { + "int": [1, 3], + "float": [2.0, np.nan], + "str": ["t", "s"], + "dt": date_range("2018-06-18", periods=2), + } + ) + + class MockGCSFileSystem(AbstractFileSystem): + def open(self, path, mode="r", *args): + if "w" not in mode: + raise FileNotFoundError + return open(os.path.join(tmpdir, "test.parquet"), mode, encoding="utf-8") + + monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem) + df1.to_parquet( + "gs://test/test.csv", index=True, engine="fastparquet", compression=None + ) + + +@td.skip_if_installed("gcsfs") +def test_gcs_not_present_exception(): + with tm.external_error_raised(ImportError): + read_csv("gs://test/test.csv") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_html.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_html.py new file mode 100644 index 0000000000000000000000000000000000000000..607357e709b6ec94225c8ff266219abdb763e085 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_html.py @@ -0,0 +1,1657 @@ +from collections.abc import Iterator +from functools import partial +from io import ( + BytesIO, + StringIO, +) +import os +from pathlib import Path +import re +import threading +from urllib.error import URLError + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + NA, + DataFrame, + MultiIndex, + Series, + Timestamp, + date_range, + read_csv, + read_html, + to_datetime, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +from pandas.io.common import file_path_to_url + + +@pytest.fixture( + params=[ + "chinese_utf-16.html", + "chinese_utf-32.html", + "chinese_utf-8.html", + "letz_latin1.html", + ] +) +def html_encoding_file(request, datapath): + """Parametrized fixture for HTML encoding test filenames.""" + return datapath("io", "data", "html_encoding", request.param) + + +def assert_framelist_equal(list1, list2, *args, **kwargs): + assert len(list1) == len(list2), ( + "lists are not of equal size " + f"len(list1) == {len(list1)}, " + f"len(list2) == {len(list2)}" + ) + msg = "not all list elements are DataFrames" + both_frames = all( + map( + lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame), + list1, + list2, + ) + ) + assert both_frames, msg + for frame_i, frame_j in zip(list1, list2): + tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs) + assert not frame_i.empty, "frames are both empty" + + +def test_bs4_version_fails(monkeypatch, datapath): + bs4 = pytest.importorskip("bs4") + pytest.importorskip("html5lib") + + monkeypatch.setattr(bs4, "__version__", "4.2") + with pytest.raises(ImportError, match="Pandas requires version"): + read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4") + + +def test_invalid_flavor(): + url = "google.com" + flavor = "invalid flavor" + msg = r"\{" + flavor + r"\} is not a valid set of flavors" + + with pytest.raises(ValueError, match=msg): + read_html(StringIO(url), match="google", flavor=flavor) + + +def test_same_ordering(datapath): + pytest.importorskip("bs4") + pytest.importorskip("lxml") + pytest.importorskip("html5lib") + + filename = datapath("io", "data", "html", "valid_markup.html") + dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"]) + dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"]) + assert_framelist_equal(dfs_lxml, dfs_bs4) + + +@pytest.fixture( + params=[ + pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]), + pytest.param("lxml", marks=td.skip_if_no("lxml")), + ], +) +def flavor_read_html(request): + return partial(read_html, flavor=request.param) + + +class TestReadHtml: + def test_literal_html_deprecation(self, flavor_read_html): + # GH 53785 + msg = ( + "Passing literal html to 'read_html' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object." + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + flavor_read_html( + """ + + + + + + + + + + + + + + + + + + +
AB
12
34
""" + ) + + @pytest.fixture + def spam_data(self, datapath): + return datapath("io", "data", "html", "spam.html") + + @pytest.fixture + def banklist_data(self, datapath): + return datapath("io", "data", "html", "banklist.html") + + def test_to_html_compat(self, flavor_read_html): + df = ( + DataFrame( + np.random.default_rng(2).random((4, 3)), + columns=pd.Index(list("abc"), dtype=object), + ) + # pylint: disable-next=consider-using-f-string + .map("{:.3f}".format).astype(float) + ) + out = df.to_html() + res = flavor_read_html( + StringIO(out), attrs={"class": "dataframe"}, index_col=0 + )[0] + tm.assert_frame_equal(res, df) + + def test_dtype_backend(self, string_storage, dtype_backend, flavor_read_html): + # GH#50286 + df = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": [True, False, None], + "f": [True, False, True], + "g": ["a", "b", "c"], + "h": ["a", "b", None], + } + ) + + if string_storage == "python": + string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) + string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_)) + elif dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) + else: + pa = pytest.importorskip("pyarrow") + string_array = ArrowStringArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArray(pa.array(["a", "b", None])) + + out = df.to_html(index=False) + with pd.option_context("mode.string_storage", string_storage): + result = flavor_read_html(StringIO(out), dtype_backend=dtype_backend)[0] + + expected = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": Series([True, False, NA], dtype="boolean"), + "f": Series([True, False, True], dtype="boolean"), + "g": string_array, + "h": string_array_na, + } + ) + + if dtype_backend == "pyarrow": + import pyarrow as pa + + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_banklist_url(self, httpserver, banklist_data, flavor_read_html): + with open(banklist_data, encoding="utf-8") as f: + httpserver.serve_content(content=f.read()) + df1 = flavor_read_html( + # lxml cannot find attrs leave out for now + httpserver.url, + match="First Federal Bank of Florida", # attrs={"class": "dataTable"} + ) + # lxml cannot find attrs leave out for now + df2 = flavor_read_html( + httpserver.url, + match="Metcalf Bank", + ) # attrs={"class": "dataTable"}) + + assert_framelist_equal(df1, df2) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_spam_url(self, httpserver, spam_data, flavor_read_html): + with open(spam_data, encoding="utf-8") as f: + httpserver.serve_content(content=f.read()) + df1 = flavor_read_html(httpserver.url, match=".*Water.*") + df2 = flavor_read_html(httpserver.url, match="Unit") + + assert_framelist_equal(df1, df2) + + @pytest.mark.slow + def test_banklist(self, banklist_data, flavor_read_html): + df1 = flavor_read_html( + banklist_data, match=".*Florida.*", attrs={"id": "table"} + ) + df2 = flavor_read_html( + banklist_data, match="Metcalf Bank", attrs={"id": "table"} + ) + + assert_framelist_equal(df1, df2) + + def test_spam(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*") + df2 = flavor_read_html(spam_data, match="Unit") + assert_framelist_equal(df1, df2) + + assert df1[0].iloc[0, 0] == "Proximates" + assert df1[0].columns[0] == "Nutrient" + + def test_spam_no_match(self, spam_data, flavor_read_html): + dfs = flavor_read_html(spam_data) + for df in dfs: + assert isinstance(df, DataFrame) + + def test_banklist_no_match(self, banklist_data, flavor_read_html): + dfs = flavor_read_html(banklist_data, attrs={"id": "table"}) + for df in dfs: + assert isinstance(df, DataFrame) + + def test_spam_header(self, spam_data, flavor_read_html): + df = flavor_read_html(spam_data, match=".*Water.*", header=2)[0] + assert df.columns[0] == "Proximates" + assert not df.empty + + def test_skiprows_int(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=1) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=1) + + assert_framelist_equal(df1, df2) + + def test_skiprows_range(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=range(2)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=range(2)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_list(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=[1, 2]) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=[2, 1]) + + assert_framelist_equal(df1, df2) + + def test_skiprows_set(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows={1, 2}) + df2 = flavor_read_html(spam_data, match="Unit", skiprows={2, 1}) + + assert_framelist_equal(df1, df2) + + def test_skiprows_slice(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=1) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=1) + + assert_framelist_equal(df1, df2) + + def test_skiprows_slice_short(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=slice(2)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=slice(2)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_slice_long(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=slice(2, 5)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=slice(4, 1, -1)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_ndarray(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=np.arange(2)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=np.arange(2)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_invalid(self, spam_data, flavor_read_html): + with pytest.raises(TypeError, match=("is not a valid type for skipping rows")): + flavor_read_html(spam_data, match=".*Water.*", skiprows="asdf") + + def test_index(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", index_col=0) + assert_framelist_equal(df1, df2) + + def test_header_and_index_no_types(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", header=1, index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", header=1, index_col=0) + assert_framelist_equal(df1, df2) + + def test_header_and_index_with_types(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", header=1, index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", header=1, index_col=0) + assert_framelist_equal(df1, df2) + + def test_infer_types(self, spam_data, flavor_read_html): + # 10892 infer_types removed + df1 = flavor_read_html(spam_data, match=".*Water.*", index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", index_col=0) + assert_framelist_equal(df1, df2) + + def test_string_io(self, spam_data, flavor_read_html): + with open(spam_data, encoding="UTF-8") as f: + data1 = StringIO(f.read()) + + with open(spam_data, encoding="UTF-8") as f: + data2 = StringIO(f.read()) + + df1 = flavor_read_html(data1, match=".*Water.*") + df2 = flavor_read_html(data2, match="Unit") + assert_framelist_equal(df1, df2) + + def test_string(self, spam_data, flavor_read_html): + with open(spam_data, encoding="UTF-8") as f: + data = f.read() + + df1 = flavor_read_html(StringIO(data), match=".*Water.*") + df2 = flavor_read_html(StringIO(data), match="Unit") + + assert_framelist_equal(df1, df2) + + def test_file_like(self, spam_data, flavor_read_html): + with open(spam_data, encoding="UTF-8") as f: + df1 = flavor_read_html(f, match=".*Water.*") + + with open(spam_data, encoding="UTF-8") as f: + df2 = flavor_read_html(f, match="Unit") + + assert_framelist_equal(df1, df2) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_bad_url_protocol(self, httpserver, flavor_read_html): + httpserver.serve_content("urlopen error unknown url type: git", code=404) + with pytest.raises(URLError, match="urlopen error unknown url type: git"): + flavor_read_html("git://github.com", match=".*Water.*") + + @pytest.mark.slow + @pytest.mark.network + @pytest.mark.single_cpu + def test_invalid_url(self, httpserver, flavor_read_html): + httpserver.serve_content("Name or service not known", code=404) + with pytest.raises((URLError, ValueError), match="HTTP Error 404: NOT FOUND"): + flavor_read_html(httpserver.url, match=".*Water.*") + + @pytest.mark.slow + def test_file_url(self, banklist_data, flavor_read_html): + url = banklist_data + dfs = flavor_read_html( + file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"} + ) + assert isinstance(dfs, list) + for df in dfs: + assert isinstance(df, DataFrame) + + @pytest.mark.slow + def test_invalid_table_attrs(self, banklist_data, flavor_read_html): + url = banklist_data + with pytest.raises(ValueError, match="No tables found"): + flavor_read_html( + url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"} + ) + + @pytest.mark.slow + def test_multiindex_header(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, match="Metcalf", attrs={"id": "table"}, header=[0, 1] + )[0] + assert isinstance(df.columns, MultiIndex) + + @pytest.mark.slow + def test_multiindex_index(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, match="Metcalf", attrs={"id": "table"}, index_col=[0, 1] + )[0] + assert isinstance(df.index, MultiIndex) + + @pytest.mark.slow + def test_multiindex_header_index(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + index_col=[0, 1], + )[0] + assert isinstance(df.columns, MultiIndex) + assert isinstance(df.index, MultiIndex) + + @pytest.mark.slow + def test_multiindex_header_skiprows_tuples(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + skiprows=1, + )[0] + assert isinstance(df.columns, MultiIndex) + + @pytest.mark.slow + def test_multiindex_header_skiprows(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + skiprows=1, + )[0] + assert isinstance(df.columns, MultiIndex) + + @pytest.mark.slow + def test_multiindex_header_index_skiprows(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + index_col=[0, 1], + skiprows=1, + )[0] + assert isinstance(df.index, MultiIndex) + assert isinstance(df.columns, MultiIndex) + + @pytest.mark.slow + def test_regex_idempotency(self, banklist_data, flavor_read_html): + url = banklist_data + dfs = flavor_read_html( + file_path_to_url(os.path.abspath(url)), + match=re.compile(re.compile("Florida")), + attrs={"id": "table"}, + ) + assert isinstance(dfs, list) + for df in dfs: + assert isinstance(df, DataFrame) + + def test_negative_skiprows(self, spam_data, flavor_read_html): + msg = r"\(you passed a negative value\)" + with pytest.raises(ValueError, match=msg): + flavor_read_html(spam_data, match="Water", skiprows=-1) + + @pytest.fixture + def python_docs(self): + return """ + + +
+ + + + + + + + + + + + +
+ +

Indices and tables:

+ + +
+ + + + + + +
+ """ # noqa: E501 + + @pytest.mark.network + @pytest.mark.single_cpu + def test_multiple_matches(self, python_docs, httpserver, flavor_read_html): + httpserver.serve_content(content=python_docs) + dfs = flavor_read_html(httpserver.url, match="Python") + assert len(dfs) > 1 + + @pytest.mark.network + @pytest.mark.single_cpu + def test_python_docs_table(self, python_docs, httpserver, flavor_read_html): + httpserver.serve_content(content=python_docs) + dfs = flavor_read_html(httpserver.url, match="Python") + zz = [df.iloc[0, 0][0:4] for df in dfs] + assert sorted(zz) == ["Pyth", "What"] + + def test_empty_tables(self, flavor_read_html): + """ + Make sure that read_html ignores empty tables. + """ + html = """ + + + + + + + + + + + + + +
AB
12
+ + + +
+ """ + result = flavor_read_html(StringIO(html)) + assert len(result) == 1 + + def test_multiple_tbody(self, flavor_read_html): + # GH-20690 + # Read all tbody tags within a single table. + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + + + + + + +
AB
12
34
""" + ) + )[0] + + expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"]) + + tm.assert_frame_equal(result, expected) + + def test_header_and_one_column(self, flavor_read_html): + """ + Don't fail with bs4 when there is a header and only one column + as described in issue #9178 + """ + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + +
Header
first
""" + ) + )[0] + + expected = DataFrame(data={"Header": "first"}, index=[0]) + + tm.assert_frame_equal(result, expected) + + def test_thead_without_tr(self, flavor_read_html): + """ + Ensure parser adds within on malformed HTML. + """ + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + + +
CountryMunicipalityYear
UkraineOdessa1944
""" + ) + )[0] + + expected = DataFrame( + data=[["Ukraine", "Odessa", 1944]], + columns=["Country", "Municipality", "Year"], + ) + + tm.assert_frame_equal(result, expected) + + def test_tfoot_read(self, flavor_read_html): + """ + Make sure that read_html reads tfoot, containing td or th. + Ignores empty tfoot + """ + data_template = """ + + + + + + + + + + + + + + {footer} + +
AB
bodyAbodyB
""" + + expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"]) + + expected2 = DataFrame( + data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"] + ) + + data1 = data_template.format(footer="") + data2 = data_template.format(footer="footAfootB") + + result1 = flavor_read_html(StringIO(data1))[0] + result2 = flavor_read_html(StringIO(data2))[0] + + tm.assert_frame_equal(result1, expected1) + tm.assert_frame_equal(result2, expected2) + + def test_parse_header_of_non_string_column(self, flavor_read_html): + # GH5048: if header is specified explicitly, an int column should be + # parsed as int while its header is parsed as str + result = flavor_read_html( + StringIO( + """ + + + + + + + + + +
SI
text1944
+ """ + ), + header=0, + )[0] + + expected = DataFrame([["text", 1944]], columns=("S", "I")) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.slow + def test_banklist_header(self, banklist_data, datapath, flavor_read_html): + from pandas.io.html import _remove_whitespace + + def try_remove_ws(x): + try: + return _remove_whitespace(x) + except AttributeError: + return x + + df = flavor_read_html(banklist_data, match="Metcalf", attrs={"id": "table"})[0] + ground_truth = read_csv( + datapath("io", "data", "csv", "banklist.csv"), + converters={"Updated Date": Timestamp, "Closing Date": Timestamp}, + ) + assert df.shape == ground_truth.shape + old = [ + "First Vietnamese American Bank In Vietnamese", + "Westernbank Puerto Rico En Espanol", + "R-G Premier Bank of Puerto Rico En Espanol", + "Eurobank En Espanol", + "Sanderson State Bank En Espanol", + "Washington Mutual Bank (Including its subsidiary Washington " + "Mutual Bank FSB)", + "Silver State Bank En Espanol", + "AmTrade International Bank En Espanol", + "Hamilton Bank, NA En Espanol", + "The Citizens Savings Bank Pioneer Community Bank, Inc.", + ] + new = [ + "First Vietnamese American Bank", + "Westernbank Puerto Rico", + "R-G Premier Bank of Puerto Rico", + "Eurobank", + "Sanderson State Bank", + "Washington Mutual Bank", + "Silver State Bank", + "AmTrade International Bank", + "Hamilton Bank, NA", + "The Citizens Savings Bank", + ] + dfnew = df.map(try_remove_ws).replace(old, new) + gtnew = ground_truth.map(try_remove_ws) + converted = dfnew + date_cols = ["Closing Date", "Updated Date"] + converted[date_cols] = converted[date_cols].apply(to_datetime) + tm.assert_frame_equal(converted, gtnew) + + @pytest.mark.slow + def test_gold_canyon(self, banklist_data, flavor_read_html): + gc = "Gold Canyon" + with open(banklist_data, encoding="utf-8") as f: + raw_text = f.read() + + assert gc in raw_text + df = flavor_read_html( + banklist_data, match="Gold Canyon", attrs={"id": "table"} + )[0] + assert gc in df.to_string() + + def test_different_number_of_cols(self, flavor_read_html): + expected = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
C_l0_g0C_l0_g1C_l0_g2C_l0_g3C_l0_g4
R_l0_g0 0.763 0.233 nan nan nan
R_l0_g1 0.244 0.285 0.392 0.137 0.222
""" + ), + index_col=0, + )[0] + + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + + + + + + + + + + + + + +
C_l0_g0C_l0_g1C_l0_g2C_l0_g3C_l0_g4
R_l0_g0 0.763 0.233
R_l0_g1 0.244 0.285 0.392 0.137 0.222
""" + ), + index_col=0, + )[0] + + tm.assert_frame_equal(result, expected) + + def test_colspan_rowspan_1(self, flavor_read_html): + # GH17054 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + +
ABC
abc
+ """ + ) + )[0] + + expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"]) + + tm.assert_frame_equal(result, expected) + + def test_colspan_rowspan_copy_values(self, flavor_read_html): + # GH17054 + + # In ASCII, with lowercase letters being copies: + # + # X x Y Z W + # A B b z C + + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + +
XYZW
ABC
+ """ + ), + header=0, + )[0] + + expected = DataFrame( + data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"] + ) + + tm.assert_frame_equal(result, expected) + + def test_colspan_rowspan_both_not_1(self, flavor_read_html): + # GH17054 + + # In ASCII, with lowercase letters being copies: + # + # A B b b C + # a b b b D + + result = flavor_read_html( + StringIO( + """ + + + + + + + + + +
ABC
D
+ """ + ), + header=0, + )[0] + + expected = DataFrame( + data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"] + ) + + tm.assert_frame_equal(result, expected) + + def test_rowspan_at_end_of_row(self, flavor_read_html): + # GH17054 + + # In ASCII, with lowercase letters being copies: + # + # A B + # C b + + result = flavor_read_html( + StringIO( + """ + + + + + + + + +
AB
C
+ """ + ), + header=0, + )[0] + + expected = DataFrame(data=[["C", "B"]], columns=["A", "B"]) + + tm.assert_frame_equal(result, expected) + + def test_rowspan_only_rows(self, flavor_read_html): + # GH17054 + + result = flavor_read_html( + StringIO( + """ + + + + + +
AB
+ """ + ), + header=0, + )[0] + + expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"]) + + tm.assert_frame_equal(result, expected) + + def test_header_inferred_from_rows_with_only_th(self, flavor_read_html): + # GH17054 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + +
AB
ab
12
+ """ + ) + )[0] + + columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]]) + expected = DataFrame(data=[[1, 2]], columns=columns) + + tm.assert_frame_equal(result, expected) + + def test_parse_dates_list(self, flavor_read_html): + df = DataFrame({"date": date_range("1/1/2001", periods=10)}) + expected = df.to_html() + res = flavor_read_html(StringIO(expected), parse_dates=[1], index_col=0) + tm.assert_frame_equal(df, res[0]) + res = flavor_read_html(StringIO(expected), parse_dates=["date"], index_col=0) + tm.assert_frame_equal(df, res[0]) + + def test_parse_dates_combine(self, flavor_read_html): + raw_dates = Series(date_range("1/1/2001", periods=10)) + df = DataFrame( + { + "date": raw_dates.map(lambda x: str(x.date())), + "time": raw_dates.map(lambda x: str(x.time())), + } + ) + res = flavor_read_html( + StringIO(df.to_html()), parse_dates={"datetime": [1, 2]}, index_col=1 + ) + newdf = DataFrame({"datetime": raw_dates}) + tm.assert_frame_equal(newdf, res[0]) + + def test_wikipedia_states_table(self, datapath, flavor_read_html): + data = datapath("io", "data", "html", "wikipedia_states.html") + assert os.path.isfile(data), f"{repr(data)} is not a file" + assert os.path.getsize(data), f"{repr(data)} is an empty file" + result = flavor_read_html(data, match="Arizona", header=1)[0] + assert result.shape == (60, 12) + assert "Unnamed" in result.columns[-1] + assert result["sq mi"].dtype == np.dtype("float64") + assert np.allclose(result.loc[0, "sq mi"], 665384.04) + + def test_wikipedia_states_multiindex(self, datapath, flavor_read_html): + data = datapath("io", "data", "html", "wikipedia_states.html") + result = flavor_read_html(data, match="Arizona", index_col=0)[0] + assert result.shape == (60, 11) + assert "Unnamed" in result.columns[-1][1] + assert result.columns.nlevels == 2 + assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04) + + def test_parser_error_on_empty_header_row(self, flavor_read_html): + result = flavor_read_html( + StringIO( + """ + + + + + + + + +
AB
ab
+ """ + ), + header=[0, 1], + ) + expected = DataFrame( + [["a", "b"]], + columns=MultiIndex.from_tuples( + [("Unnamed: 0_level_0", "A"), ("Unnamed: 1_level_0", "B")] + ), + ) + tm.assert_frame_equal(result[0], expected) + + def test_decimal_rows(self, flavor_read_html): + # GH 12907 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + +
Header
1100#101
+ + """ + ), + decimal="#", + )[0] + + expected = DataFrame(data={"Header": 1100.101}, index=[0]) + + assert result["Header"].dtype == np.dtype("float64") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("arg", [True, False]) + def test_bool_header_arg(self, spam_data, arg, flavor_read_html): + # GH 6114 + msg = re.escape( + "Passing a bool to header is invalid. Use header=None for no header or " + "header=int or list-like of ints to specify the row(s) making up the " + "column names" + ) + with pytest.raises(TypeError, match=msg): + flavor_read_html(spam_data, header=arg) + + def test_converters(self, flavor_read_html): + # GH 13461 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + +
a
0.763
0.244
""" + ), + converters={"a": str}, + )[0] + + expected = DataFrame({"a": ["0.763", "0.244"]}) + + tm.assert_frame_equal(result, expected) + + def test_na_values(self, flavor_read_html): + # GH 13461 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + +
a
0.763
0.244
""" + ), + na_values=[0.244], + )[0] + + expected = DataFrame({"a": [0.763, np.nan]}) + + tm.assert_frame_equal(result, expected) + + def test_keep_default_na(self, flavor_read_html): + html_data = """ + + + + + + + + + + + + + +
a
N/A
NA
""" + + expected_df = DataFrame({"a": ["N/A", "NA"]}) + html_df = flavor_read_html(StringIO(html_data), keep_default_na=False)[0] + tm.assert_frame_equal(expected_df, html_df) + + expected_df = DataFrame({"a": [np.nan, np.nan]}) + html_df = flavor_read_html(StringIO(html_data), keep_default_na=True)[0] + tm.assert_frame_equal(expected_df, html_df) + + def test_preserve_empty_rows(self, flavor_read_html): + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + +
AB
ab
+ """ + ) + )[0] + + expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"]) + + tm.assert_frame_equal(result, expected) + + def test_ignore_empty_rows_when_inferring_header(self, flavor_read_html): + result = flavor_read_html( + StringIO( + """ + + + + + + + + + +
AB
ab
12
+ """ + ) + )[0] + + columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]]) + expected = DataFrame(data=[[1, 2]], columns=columns) + + tm.assert_frame_equal(result, expected) + + def test_multiple_header_rows(self, flavor_read_html): + # Issue #13434 + expected_df = DataFrame( + data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")] + ) + expected_df.columns = [ + ["Unnamed: 0_level_0", "Age", "Party"], + ["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"], + ] + html = expected_df.to_html(index=False) + html_df = flavor_read_html(StringIO(html))[0] + tm.assert_frame_equal(expected_df, html_df) + + def test_works_on_valid_markup(self, datapath, flavor_read_html): + filename = datapath("io", "data", "html", "valid_markup.html") + dfs = flavor_read_html(filename, index_col=0) + assert isinstance(dfs, list) + assert isinstance(dfs[0], DataFrame) + + @pytest.mark.slow + def test_fallback_success(self, datapath, flavor_read_html): + banklist_data = datapath("io", "data", "html", "banklist.html") + + flavor_read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"]) + + def test_to_html_timestamp(self): + rng = date_range("2000-01-01", periods=10) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)), index=rng) + + result = df.to_html() + assert "2000-01-01" in result + + def test_to_html_borderless(self): + df = DataFrame([{"A": 1, "B": 2}]) + out_border_default = df.to_html() + out_border_true = df.to_html(border=True) + out_border_explicit_default = df.to_html(border=1) + out_border_nondefault = df.to_html(border=2) + out_border_zero = df.to_html(border=0) + + out_border_false = df.to_html(border=False) + + assert ' border="1"' in out_border_default + assert out_border_true == out_border_default + assert out_border_default == out_border_explicit_default + assert out_border_default != out_border_nondefault + assert ' border="2"' in out_border_nondefault + assert ' border="0"' not in out_border_zero + assert " border" not in out_border_false + assert out_border_zero == out_border_false + + @pytest.mark.parametrize( + "displayed_only,exp0,exp1", + [ + (True, DataFrame(["foo"]), None), + (False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])), + ], + ) + def test_displayed_only(self, displayed_only, exp0, exp1, flavor_read_html): + # GH 20027 + data = """ + + + + + +
+ foo + bar + baz + qux +
+ + + + +
foo
+ + """ + + dfs = flavor_read_html(StringIO(data), displayed_only=displayed_only) + tm.assert_frame_equal(dfs[0], exp0) + + if exp1 is not None: + tm.assert_frame_equal(dfs[1], exp1) + else: + assert len(dfs) == 1 # Should not parse hidden table + + @pytest.mark.parametrize("displayed_only", [True, False]) + def test_displayed_only_with_many_elements(self, displayed_only, flavor_read_html): + html_table = """ + + + + + + + + + + + + + +
AB
12
45
+ """ + result = flavor_read_html(StringIO(html_table), displayed_only=displayed_only)[ + 0 + ] + expected = DataFrame({"A": [1, 4], "B": [2, 5]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:You provided Unicode markup but also provided a value for " + "from_encoding.*:UserWarning" + ) + def test_encode(self, html_encoding_file, flavor_read_html): + base_path = os.path.basename(html_encoding_file) + root = os.path.splitext(base_path)[0] + _, encoding = root.split("_") + + try: + with open(html_encoding_file, "rb") as fobj: + from_string = flavor_read_html( + fobj.read(), encoding=encoding, index_col=0 + ).pop() + + with open(html_encoding_file, "rb") as fobj: + from_file_like = flavor_read_html( + BytesIO(fobj.read()), encoding=encoding, index_col=0 + ).pop() + + from_filename = flavor_read_html( + html_encoding_file, encoding=encoding, index_col=0 + ).pop() + tm.assert_frame_equal(from_string, from_file_like) + tm.assert_frame_equal(from_string, from_filename) + except Exception: + # seems utf-16/32 fail on windows + if is_platform_windows(): + if "16" in encoding or "32" in encoding: + pytest.skip() + raise + + def test_parse_failure_unseekable(self, flavor_read_html): + # Issue #17975 + + if flavor_read_html.keywords.get("flavor") == "lxml": + pytest.skip("Not applicable for lxml") + + class UnseekableStringIO(StringIO): + def seekable(self): + return False + + bad = UnseekableStringIO( + """ +
spameggs
""" + ) + + assert flavor_read_html(bad) + + with pytest.raises(ValueError, match="passed a non-rewindable file object"): + flavor_read_html(bad) + + def test_parse_failure_rewinds(self, flavor_read_html): + # Issue #17975 + + class MockFile: + def __init__(self, data) -> None: + self.data = data + self.at_end = False + + def read(self, size=None): + data = "" if self.at_end else self.data + self.at_end = True + return data + + def seek(self, offset): + self.at_end = False + + def seekable(self): + return True + + # GH 49036 pylint checks for presence of __next__ for iterators + def __next__(self): + ... + + def __iter__(self) -> Iterator: + # `is_file_like` depends on the presence of + # the __iter__ attribute. + return self + + good = MockFile("
spam
eggs
") + bad = MockFile("
spameggs
") + + assert flavor_read_html(good) + assert flavor_read_html(bad) + + @pytest.mark.slow + @pytest.mark.single_cpu + def test_importcheck_thread_safety(self, datapath, flavor_read_html): + # see gh-16928 + + class ErrorThread(threading.Thread): + def run(self): + try: + super().run() + except Exception as err: + self.err = err + else: + self.err = None + + filename = datapath("io", "data", "html", "valid_markup.html") + helper_thread1 = ErrorThread(target=flavor_read_html, args=(filename,)) + helper_thread2 = ErrorThread(target=flavor_read_html, args=(filename,)) + + helper_thread1.start() + helper_thread2.start() + + while helper_thread1.is_alive() or helper_thread2.is_alive(): + pass + assert None is helper_thread1.err is helper_thread2.err + + def test_parse_path_object(self, datapath, flavor_read_html): + # GH 37705 + file_path_string = datapath("io", "data", "html", "spam.html") + file_path = Path(file_path_string) + df1 = flavor_read_html(file_path_string)[0] + df2 = flavor_read_html(file_path)[0] + tm.assert_frame_equal(df1, df2) + + def test_parse_br_as_space(self, flavor_read_html): + # GH 29528: pd.read_html() convert
to space + result = flavor_read_html( + StringIO( + """ + + + + + + + +
A
word1
word2
+ """ + ) + )[0] + + expected = DataFrame(data=[["word1 word2"]], columns=["A"]) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("arg", ["all", "body", "header", "footer"]) + def test_extract_links(self, arg, flavor_read_html): + gh_13141_data = """ + + + + + + + + + + + + + + + + + +
HTTPFTPLinkless
WikipediaSURROUNDING Debian TEXTLinkless
Footer + Multiple links: Only first captured. +
+ """ + + gh_13141_expected = { + "head_ignore": ["HTTP", "FTP", "Linkless"], + "head_extract": [ + ("HTTP", None), + ("FTP", None), + ("Linkless", "https://en.wiktionary.org/wiki/linkless"), + ], + "body_ignore": ["Wikipedia", "SURROUNDING Debian TEXT", "Linkless"], + "body_extract": [ + ("Wikipedia", "https://en.wikipedia.org/"), + ("SURROUNDING Debian TEXT", "ftp://ftp.us.debian.org/"), + ("Linkless", None), + ], + "footer_ignore": [ + "Footer", + "Multiple links: Only first captured.", + None, + ], + "footer_extract": [ + ("Footer", "https://en.wikipedia.org/wiki/Page_footer"), + ("Multiple links: Only first captured.", "1"), + None, + ], + } + + data_exp = gh_13141_expected["body_ignore"] + foot_exp = gh_13141_expected["footer_ignore"] + head_exp = gh_13141_expected["head_ignore"] + if arg == "all": + data_exp = gh_13141_expected["body_extract"] + foot_exp = gh_13141_expected["footer_extract"] + head_exp = gh_13141_expected["head_extract"] + elif arg == "body": + data_exp = gh_13141_expected["body_extract"] + elif arg == "footer": + foot_exp = gh_13141_expected["footer_extract"] + elif arg == "header": + head_exp = gh_13141_expected["head_extract"] + + result = flavor_read_html(StringIO(gh_13141_data), extract_links=arg)[0] + expected = DataFrame([data_exp, foot_exp], columns=head_exp) + expected = expected.fillna(np.nan) + tm.assert_frame_equal(result, expected) + + def test_extract_links_bad(self, spam_data): + msg = ( + "`extract_links` must be one of " + '{None, "header", "footer", "body", "all"}, got "incorrect"' + ) + with pytest.raises(ValueError, match=msg): + read_html(spam_data, extract_links="incorrect") + + def test_extract_links_all_no_header(self, flavor_read_html): + # GH 48316 + data = """ + + + + +
+ Google.com +
+ """ + result = flavor_read_html(StringIO(data), extract_links="all")[0] + expected = DataFrame([[("Google.com", "https://google.com")]]) + tm.assert_frame_equal(result, expected) + + def test_invalid_dtype_backend(self): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + read_html("test", dtype_backend="numpy") + + def test_style_tag(self, flavor_read_html): + # GH 48316 + data = """ + + + + + + + + + + + + + +
+ + A + B
A1B1
A2B2
+ """ + result = flavor_read_html(StringIO(data))[0] + expected = DataFrame(data=[["A1", "B1"], ["A2", "B2"]], columns=["A", "B"]) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_http_headers.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_http_headers.py new file mode 100644 index 0000000000000000000000000000000000000000..2ca11ad1f74e6381e389577e821d37d89cc689db --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_http_headers.py @@ -0,0 +1,172 @@ +""" +Tests for the pandas custom headers in http(s) requests +""" +from functools import partial +import gzip +from io import BytesIO + +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + +pytestmark = [ + pytest.mark.single_cpu, + pytest.mark.network, + pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" + ), +] + + +def gzip_bytes(response_bytes): + with BytesIO() as bio: + with gzip.GzipFile(fileobj=bio, mode="w") as zipper: + zipper.write(response_bytes) + return bio.getvalue() + + +def csv_responder(df): + return df.to_csv(index=False).encode("utf-8") + + +def gz_csv_responder(df): + return gzip_bytes(csv_responder(df)) + + +def json_responder(df): + return df.to_json().encode("utf-8") + + +def gz_json_responder(df): + return gzip_bytes(json_responder(df)) + + +def html_responder(df): + return df.to_html(index=False).encode("utf-8") + + +def parquetpyarrow_reponder(df): + return df.to_parquet(index=False, engine="pyarrow") + + +def parquetfastparquet_responder(df): + # the fastparquet engine doesn't like to write to a buffer + # it can do it via the open_with function being set appropriately + # however it automatically calls the close method and wipes the buffer + # so just overwrite that attribute on this instance to not do that + + # protected by an importorskip in the respective test + import fsspec + + df.to_parquet( + "memory://fastparquet_user_agent.parquet", + index=False, + engine="fastparquet", + compression=None, + ) + with fsspec.open("memory://fastparquet_user_agent.parquet", "rb") as f: + return f.read() + + +def pickle_respnder(df): + with BytesIO() as bio: + df.to_pickle(bio) + return bio.getvalue() + + +def stata_responder(df): + with BytesIO() as bio: + df.to_stata(bio, write_index=False) + return bio.getvalue() + + +@pytest.mark.parametrize( + "responder, read_method", + [ + (csv_responder, pd.read_csv), + (json_responder, pd.read_json), + ( + html_responder, + lambda *args, **kwargs: pd.read_html(*args, **kwargs)[0], + ), + pytest.param( + parquetpyarrow_reponder, + partial(pd.read_parquet, engine="pyarrow"), + marks=td.skip_if_no("pyarrow"), + ), + pytest.param( + parquetfastparquet_responder, + partial(pd.read_parquet, engine="fastparquet"), + # TODO(ArrayManager) fastparquet + marks=[ + td.skip_if_no("fastparquet"), + td.skip_if_no("fsspec"), + td.skip_array_manager_not_yet_implemented, + ], + ), + (pickle_respnder, pd.read_pickle), + (stata_responder, pd.read_stata), + (gz_csv_responder, pd.read_csv), + (gz_json_responder, pd.read_json), + ], +) +@pytest.mark.parametrize( + "storage_options", + [ + None, + {"User-Agent": "foo"}, + {"User-Agent": "foo", "Auth": "bar"}, + ], +) +def test_request_headers(responder, read_method, httpserver, storage_options): + expected = pd.DataFrame({"a": ["b"]}) + default_headers = ["Accept-Encoding", "Host", "Connection", "User-Agent"] + if "gz" in responder.__name__: + extra = {"Content-Encoding": "gzip"} + if storage_options is None: + storage_options = extra + else: + storage_options |= extra + else: + extra = None + expected_headers = set(default_headers).union( + storage_options.keys() if storage_options else [] + ) + httpserver.serve_content(content=responder(expected), headers=extra) + result = read_method(httpserver.url, storage_options=storage_options) + tm.assert_frame_equal(result, expected) + + request_headers = dict(httpserver.requests[0].headers) + for header in expected_headers: + exp = request_headers.pop(header) + if storage_options and header in storage_options: + assert exp == storage_options[header] + # No extra headers added + assert not request_headers + + +@pytest.mark.parametrize( + "engine", + [ + "pyarrow", + "fastparquet", + ], +) +def test_to_parquet_to_disk_with_storage_options(engine): + headers = { + "User-Agent": "custom", + "Auth": "other_custom", + } + + pytest.importorskip(engine) + + true_df = pd.DataFrame({"column_name": ["column_value"]}) + msg = ( + "storage_options passed with file object or non-fsspec file path|" + "storage_options passed with buffer, or non-supported URL" + ) + with pytest.raises(ValueError, match=msg): + true_df.to_parquet("/tmp/junk.parquet", storage_options=headers, engine=engine) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_orc.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_orc.py new file mode 100644 index 0000000000000000000000000000000000000000..a4021311fc963a41633ebec2680c7f6d79525044 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_orc.py @@ -0,0 +1,436 @@ +""" test orc compat """ +import datetime +from decimal import Decimal +from io import BytesIO +import os +import pathlib + +import numpy as np +import pytest + +import pandas as pd +from pandas import read_orc +import pandas._testing as tm +from pandas.core.arrays import StringArray + +pytest.importorskip("pyarrow.orc") + +import pyarrow as pa + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def dirpath(datapath): + return datapath("io", "data", "orc") + + +@pytest.fixture( + params=[ + np.array([1, 20], dtype="uint64"), + pd.Series(["a", "b", "a"], dtype="category"), + [pd.Interval(left=0, right=2), pd.Interval(left=0, right=5)], + [pd.Period("2022-01-03", freq="D"), pd.Period("2022-01-04", freq="D")], + ] +) +def orc_writer_dtypes_not_supported(request): + # Examples of dataframes with dtypes for which conversion to ORC + # hasn't been implemented yet, that is, Category, unsigned integers, + # interval, period and sparse. + return pd.DataFrame({"unimpl": request.param}) + + +def test_orc_reader_empty(dirpath): + columns = [ + "boolean1", + "byte1", + "short1", + "int1", + "long1", + "float1", + "double1", + "bytes1", + "string1", + ] + dtypes = [ + "bool", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "object", + "object", + ] + expected = pd.DataFrame(index=pd.RangeIndex(0)) + for colname, dtype in zip(columns, dtypes): + expected[colname] = pd.Series(dtype=dtype) + + inputfile = os.path.join(dirpath, "TestOrcFile.emptyFile.orc") + got = read_orc(inputfile, columns=columns) + + tm.assert_equal(expected, got) + + +def test_orc_reader_basic(dirpath): + data = { + "boolean1": np.array([False, True], dtype="bool"), + "byte1": np.array([1, 100], dtype="int8"), + "short1": np.array([1024, 2048], dtype="int16"), + "int1": np.array([65536, 65536], dtype="int32"), + "long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"), + "float1": np.array([1.0, 2.0], dtype="float32"), + "double1": np.array([-15.0, -5.0], dtype="float64"), + "bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"), + "string1": np.array(["hi", "bye"], dtype="object"), + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.test1.orc") + got = read_orc(inputfile, columns=data.keys()) + + tm.assert_equal(expected, got) + + +def test_orc_reader_decimal(dirpath): + # Only testing the first 10 rows of data + data = { + "_col0": np.array( + [ + Decimal("-1000.50000"), + Decimal("-999.60000"), + Decimal("-998.70000"), + Decimal("-997.80000"), + Decimal("-996.90000"), + Decimal("-995.10000"), + Decimal("-994.11000"), + Decimal("-993.12000"), + Decimal("-992.13000"), + Decimal("-991.14000"), + ], + dtype="object", + ) + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.decimal.orc") + got = read_orc(inputfile).iloc[:10] + + tm.assert_equal(expected, got) + + +def test_orc_reader_date_low(dirpath): + data = { + "time": np.array( + [ + "1900-05-05 12:34:56.100000", + "1900-05-05 12:34:56.100100", + "1900-05-05 12:34:56.100200", + "1900-05-05 12:34:56.100300", + "1900-05-05 12:34:56.100400", + "1900-05-05 12:34:56.100500", + "1900-05-05 12:34:56.100600", + "1900-05-05 12:34:56.100700", + "1900-05-05 12:34:56.100800", + "1900-05-05 12:34:56.100900", + ], + dtype="datetime64[ns]", + ), + "date": np.array( + [ + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + ], + dtype="object", + ), + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.testDate1900.orc") + got = read_orc(inputfile).iloc[:10] + + tm.assert_equal(expected, got) + + +def test_orc_reader_date_high(dirpath): + data = { + "time": np.array( + [ + "2038-05-05 12:34:56.100000", + "2038-05-05 12:34:56.100100", + "2038-05-05 12:34:56.100200", + "2038-05-05 12:34:56.100300", + "2038-05-05 12:34:56.100400", + "2038-05-05 12:34:56.100500", + "2038-05-05 12:34:56.100600", + "2038-05-05 12:34:56.100700", + "2038-05-05 12:34:56.100800", + "2038-05-05 12:34:56.100900", + ], + dtype="datetime64[ns]", + ), + "date": np.array( + [ + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + ], + dtype="object", + ), + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.testDate2038.orc") + got = read_orc(inputfile).iloc[:10] + + tm.assert_equal(expected, got) + + +def test_orc_reader_snappy_compressed(dirpath): + data = { + "int1": np.array( + [ + -1160101563, + 1181413113, + 2065821249, + -267157795, + 172111193, + 1752363137, + 1406072123, + 1911809390, + -1308542224, + -467100286, + ], + dtype="int32", + ), + "string1": np.array( + [ + "f50dcb8", + "382fdaaa", + "90758c6", + "9e8caf3f", + "ee97332b", + "d634da1", + "2bea4396", + "d67d89e8", + "ad71007e", + "e8c82066", + ], + dtype="object", + ), + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.testSnappy.orc") + got = read_orc(inputfile).iloc[:10] + + tm.assert_equal(expected, got) + + +def test_orc_roundtrip_file(dirpath): + # GH44554 + # PyArrow gained ORC write support with the current argument order + pytest.importorskip("pyarrow") + + data = { + "boolean1": np.array([False, True], dtype="bool"), + "byte1": np.array([1, 100], dtype="int8"), + "short1": np.array([1024, 2048], dtype="int16"), + "int1": np.array([65536, 65536], dtype="int32"), + "long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"), + "float1": np.array([1.0, 2.0], dtype="float32"), + "double1": np.array([-15.0, -5.0], dtype="float64"), + "bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"), + "string1": np.array(["hi", "bye"], dtype="object"), + } + expected = pd.DataFrame.from_dict(data) + + with tm.ensure_clean() as path: + expected.to_orc(path) + got = read_orc(path) + + tm.assert_equal(expected, got) + + +def test_orc_roundtrip_bytesio(): + # GH44554 + # PyArrow gained ORC write support with the current argument order + pytest.importorskip("pyarrow") + + data = { + "boolean1": np.array([False, True], dtype="bool"), + "byte1": np.array([1, 100], dtype="int8"), + "short1": np.array([1024, 2048], dtype="int16"), + "int1": np.array([65536, 65536], dtype="int32"), + "long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"), + "float1": np.array([1.0, 2.0], dtype="float32"), + "double1": np.array([-15.0, -5.0], dtype="float64"), + "bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"), + "string1": np.array(["hi", "bye"], dtype="object"), + } + expected = pd.DataFrame.from_dict(data) + + bytes = expected.to_orc() + got = read_orc(BytesIO(bytes)) + + tm.assert_equal(expected, got) + + +def test_orc_writer_dtypes_not_supported(orc_writer_dtypes_not_supported): + # GH44554 + # PyArrow gained ORC write support with the current argument order + pytest.importorskip("pyarrow") + + msg = "The dtype of one or more columns is not supported yet." + with pytest.raises(NotImplementedError, match=msg): + orc_writer_dtypes_not_supported.to_orc() + + +def test_orc_dtype_backend_pyarrow(): + pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "string": list("abc"), + "string_with_nan": ["a", np.nan, "c"], + "string_with_none": ["a", None, "c"], + "bytes": [b"foo", b"bar", None], + "int": list(range(1, 4)), + "float": np.arange(4.0, 7.0, dtype="float64"), + "float_with_nan": [2.0, np.nan, 3.0], + "bool": [True, False, True], + "bool_with_na": [True, False, None], + "datetime": pd.date_range("20130101", periods=3), + "datetime_with_nat": [ + pd.Timestamp("20130101"), + pd.NaT, + pd.Timestamp("20130103"), + ], + } + ) + + bytes_data = df.copy().to_orc() + result = read_orc(BytesIO(bytes_data), dtype_backend="pyarrow") + + expected = pd.DataFrame( + { + col: pd.arrays.ArrowExtensionArray(pa.array(df[col], from_pandas=True)) + for col in df.columns + } + ) + + tm.assert_frame_equal(result, expected) + + +def test_orc_dtype_backend_numpy_nullable(): + # GH#50503 + pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "string": list("abc"), + "string_with_nan": ["a", np.nan, "c"], + "string_with_none": ["a", None, "c"], + "int": list(range(1, 4)), + "int_with_nan": pd.Series([1, pd.NA, 3], dtype="Int64"), + "na_only": pd.Series([pd.NA, pd.NA, pd.NA], dtype="Int64"), + "float": np.arange(4.0, 7.0, dtype="float64"), + "float_with_nan": [2.0, np.nan, 3.0], + "bool": [True, False, True], + "bool_with_na": [True, False, None], + } + ) + + bytes_data = df.copy().to_orc() + result = read_orc(BytesIO(bytes_data), dtype_backend="numpy_nullable") + + expected = pd.DataFrame( + { + "string": StringArray(np.array(["a", "b", "c"], dtype=np.object_)), + "string_with_nan": StringArray( + np.array(["a", pd.NA, "c"], dtype=np.object_) + ), + "string_with_none": StringArray( + np.array(["a", pd.NA, "c"], dtype=np.object_) + ), + "int": pd.Series([1, 2, 3], dtype="Int64"), + "int_with_nan": pd.Series([1, pd.NA, 3], dtype="Int64"), + "na_only": pd.Series([pd.NA, pd.NA, pd.NA], dtype="Int64"), + "float": pd.Series([4.0, 5.0, 6.0], dtype="Float64"), + "float_with_nan": pd.Series([2.0, pd.NA, 3.0], dtype="Float64"), + "bool": pd.Series([True, False, True], dtype="boolean"), + "bool_with_na": pd.Series([True, False, pd.NA], dtype="boolean"), + } + ) + + tm.assert_frame_equal(result, expected) + + +def test_orc_uri_path(): + expected = pd.DataFrame({"int": list(range(1, 4))}) + with tm.ensure_clean("tmp.orc") as path: + expected.to_orc(path) + uri = pathlib.Path(path).as_uri() + result = read_orc(uri) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "index", + [ + pd.RangeIndex(start=2, stop=5, step=1), + pd.RangeIndex(start=0, stop=3, step=1, name="non-default"), + pd.Index([1, 2, 3]), + ], +) +def test_to_orc_non_default_index(index): + df = pd.DataFrame({"a": [1, 2, 3]}, index=index) + msg = ( + "orc does not support serializing a non-default index|" + "orc does not serialize index meta-data" + ) + with pytest.raises(ValueError, match=msg): + df.to_orc() + + +def test_invalid_dtype_backend(): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + df = pd.DataFrame({"int": list(range(1, 4))}) + with tm.ensure_clean("tmp.orc") as path: + df.to_orc(path) + with pytest.raises(ValueError, match=msg): + read_orc(path, dtype_backend="numpy") + + +def test_string_inference(tmp_path): + # GH#54431 + path = tmp_path / "test_string_inference.p" + df = pd.DataFrame(data={"a": ["x", "y"]}) + df.to_orc(path) + with pd.option_context("future.infer_string", True): + result = read_orc(path) + expected = pd.DataFrame( + data={"a": ["x", "y"]}, + dtype="string[pyarrow_numpy]", + columns=pd.Index(["a"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_parquet.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b94177eedb20d9e5c2ae354006a2b7d8a2b42d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_parquet.py @@ -0,0 +1,1424 @@ +""" test parquet compat """ +import datetime +from decimal import Decimal +from io import BytesIO +import os +import pathlib + +import numpy as np +import pytest + +from pandas._config import using_copy_on_write +from pandas._config.config import _get_option + +from pandas.compat import is_platform_windows +from pandas.compat.pyarrow import ( + pa_version_under11p0, + pa_version_under13p0, + pa_version_under15p0, +) + +import pandas as pd +import pandas._testing as tm +from pandas.util.version import Version + +from pandas.io.parquet import ( + FastParquetImpl, + PyArrowImpl, + get_engine, + read_parquet, + to_parquet, +) + +try: + import pyarrow + + _HAVE_PYARROW = True +except ImportError: + _HAVE_PYARROW = False + +try: + import fastparquet + + _HAVE_FASTPARQUET = True +except ImportError: + _HAVE_FASTPARQUET = False + + +# TODO(ArrayManager) fastparquet relies on BlockManager internals + +pytestmark = [ + pytest.mark.filterwarnings("ignore:DataFrame._data is deprecated:FutureWarning"), + pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" + ), +] + + +# setup engines & skips +@pytest.fixture( + params=[ + pytest.param( + "fastparquet", + marks=pytest.mark.skipif( + not _HAVE_FASTPARQUET + or _get_option("mode.data_manager", silent=True) == "array", + reason="fastparquet is not installed or ArrayManager is used", + ), + ), + pytest.param( + "pyarrow", + marks=pytest.mark.skipif( + not _HAVE_PYARROW, reason="pyarrow is not installed" + ), + ), + ] +) +def engine(request): + return request.param + + +@pytest.fixture +def pa(): + if not _HAVE_PYARROW: + pytest.skip("pyarrow is not installed") + return "pyarrow" + + +@pytest.fixture +def fp(): + if not _HAVE_FASTPARQUET: + pytest.skip("fastparquet is not installed") + elif _get_option("mode.data_manager", silent=True) == "array": + pytest.skip("ArrayManager is not supported with fastparquet") + return "fastparquet" + + +@pytest.fixture +def df_compat(): + return pd.DataFrame({"A": [1, 2, 3], "B": "foo"}) + + +@pytest.fixture +def df_cross_compat(): + df = pd.DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + # 'c': np.arange(3, 6).astype('u1'), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("20130101", periods=3), + # 'g': pd.date_range('20130101', periods=3, + # tz='US/Eastern'), + # 'h': pd.date_range('20130101', periods=3, freq='ns') + } + ) + return df + + +@pytest.fixture +def df_full(): + return pd.DataFrame( + { + "string": list("abc"), + "string_with_nan": ["a", np.nan, "c"], + "string_with_none": ["a", None, "c"], + "bytes": [b"foo", b"bar", b"baz"], + "unicode": ["foo", "bar", "baz"], + "int": list(range(1, 4)), + "uint": np.arange(3, 6).astype("u1"), + "float": np.arange(4.0, 7.0, dtype="float64"), + "float_with_nan": [2.0, np.nan, 3.0], + "bool": [True, False, True], + "datetime": pd.date_range("20130101", periods=3), + "datetime_with_nat": [ + pd.Timestamp("20130101"), + pd.NaT, + pd.Timestamp("20130103"), + ], + } + ) + + +@pytest.fixture( + params=[ + datetime.datetime.now(datetime.timezone.utc), + datetime.datetime.now(datetime.timezone.min), + datetime.datetime.now(datetime.timezone.max), + datetime.datetime.strptime("2019-01-04T16:41:24+0200", "%Y-%m-%dT%H:%M:%S%z"), + datetime.datetime.strptime("2019-01-04T16:41:24+0215", "%Y-%m-%dT%H:%M:%S%z"), + datetime.datetime.strptime("2019-01-04T16:41:24-0200", "%Y-%m-%dT%H:%M:%S%z"), + datetime.datetime.strptime("2019-01-04T16:41:24-0215", "%Y-%m-%dT%H:%M:%S%z"), + ] +) +def timezone_aware_date_list(request): + return request.param + + +def check_round_trip( + df, + engine=None, + path=None, + write_kwargs=None, + read_kwargs=None, + expected=None, + check_names=True, + check_like=False, + check_dtype=True, + repeat=2, +): + """Verify parquet serializer and deserializer produce the same results. + + Performs a pandas to disk and disk to pandas round trip, + then compares the 2 resulting DataFrames to verify equality. + + Parameters + ---------- + df: Dataframe + engine: str, optional + 'pyarrow' or 'fastparquet' + path: str, optional + write_kwargs: dict of str:str, optional + read_kwargs: dict of str:str, optional + expected: DataFrame, optional + Expected deserialization result, otherwise will be equal to `df` + check_names: list of str, optional + Closed set of column names to be compared + check_like: bool, optional + If True, ignore the order of index & columns. + repeat: int, optional + How many times to repeat the test + """ + write_kwargs = write_kwargs or {"compression": None} + read_kwargs = read_kwargs or {} + + if expected is None: + expected = df + + if engine: + write_kwargs["engine"] = engine + read_kwargs["engine"] = engine + + def compare(repeat): + for _ in range(repeat): + df.to_parquet(path, **write_kwargs) + actual = read_parquet(path, **read_kwargs) + + if "string_with_nan" in expected: + expected.loc[1, "string_with_nan"] = None + tm.assert_frame_equal( + expected, + actual, + check_names=check_names, + check_like=check_like, + check_dtype=check_dtype, + ) + + if path is None: + with tm.ensure_clean() as path: + compare(repeat) + else: + compare(repeat) + + +def check_partition_names(path, expected): + """Check partitions of a parquet file are as expected. + + Parameters + ---------- + path: str + Path of the dataset. + expected: iterable of str + Expected partition names. + """ + import pyarrow.dataset as ds + + dataset = ds.dataset(path, partitioning="hive") + assert dataset.partitioning.schema.names == expected + + +def test_invalid_engine(df_compat): + msg = "engine must be one of 'pyarrow', 'fastparquet'" + with pytest.raises(ValueError, match=msg): + check_round_trip(df_compat, "foo", "bar") + + +def test_options_py(df_compat, pa): + # use the set option + + with pd.option_context("io.parquet.engine", "pyarrow"): + check_round_trip(df_compat) + + +def test_options_fp(df_compat, fp): + # use the set option + + with pd.option_context("io.parquet.engine", "fastparquet"): + check_round_trip(df_compat) + + +def test_options_auto(df_compat, fp, pa): + # use the set option + + with pd.option_context("io.parquet.engine", "auto"): + check_round_trip(df_compat) + + +def test_options_get_engine(fp, pa): + assert isinstance(get_engine("pyarrow"), PyArrowImpl) + assert isinstance(get_engine("fastparquet"), FastParquetImpl) + + with pd.option_context("io.parquet.engine", "pyarrow"): + assert isinstance(get_engine("auto"), PyArrowImpl) + assert isinstance(get_engine("pyarrow"), PyArrowImpl) + assert isinstance(get_engine("fastparquet"), FastParquetImpl) + + with pd.option_context("io.parquet.engine", "fastparquet"): + assert isinstance(get_engine("auto"), FastParquetImpl) + assert isinstance(get_engine("pyarrow"), PyArrowImpl) + assert isinstance(get_engine("fastparquet"), FastParquetImpl) + + with pd.option_context("io.parquet.engine", "auto"): + assert isinstance(get_engine("auto"), PyArrowImpl) + assert isinstance(get_engine("pyarrow"), PyArrowImpl) + assert isinstance(get_engine("fastparquet"), FastParquetImpl) + + +def test_get_engine_auto_error_message(): + # Expect different error messages from get_engine(engine="auto") + # if engines aren't installed vs. are installed but bad version + from pandas.compat._optional import VERSIONS + + # Do we have engines installed, but a bad version of them? + pa_min_ver = VERSIONS.get("pyarrow") + fp_min_ver = VERSIONS.get("fastparquet") + have_pa_bad_version = ( + False + if not _HAVE_PYARROW + else Version(pyarrow.__version__) < Version(pa_min_ver) + ) + have_fp_bad_version = ( + False + if not _HAVE_FASTPARQUET + else Version(fastparquet.__version__) < Version(fp_min_ver) + ) + # Do we have usable engines installed? + have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version + have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version + + if not have_usable_pa and not have_usable_fp: + # No usable engines found. + if have_pa_bad_version: + match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow." + with pytest.raises(ImportError, match=match): + get_engine("auto") + else: + match = "Missing optional dependency .pyarrow." + with pytest.raises(ImportError, match=match): + get_engine("auto") + + if have_fp_bad_version: + match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet." + with pytest.raises(ImportError, match=match): + get_engine("auto") + else: + match = "Missing optional dependency .fastparquet." + with pytest.raises(ImportError, match=match): + get_engine("auto") + + +def test_cross_engine_pa_fp(df_cross_compat, pa, fp): + # cross-compat with differing reading/writing engines + + df = df_cross_compat + with tm.ensure_clean() as path: + df.to_parquet(path, engine=pa, compression=None) + + result = read_parquet(path, engine=fp) + tm.assert_frame_equal(result, df) + + result = read_parquet(path, engine=fp, columns=["a", "d"]) + tm.assert_frame_equal(result, df[["a", "d"]]) + + +def test_cross_engine_fp_pa(df_cross_compat, pa, fp): + # cross-compat with differing reading/writing engines + df = df_cross_compat + with tm.ensure_clean() as path: + df.to_parquet(path, engine=fp, compression=None) + + result = read_parquet(path, engine=pa) + tm.assert_frame_equal(result, df) + + result = read_parquet(path, engine=pa, columns=["a", "d"]) + tm.assert_frame_equal(result, df[["a", "d"]]) + + +def test_parquet_pos_args_deprecation(engine): + # GH-54229 + df = pd.DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_parquet except for the " + r"argument 'path' will be keyword-only." + ) + with tm.ensure_clean() as path: + with tm.assert_produces_warning( + FutureWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + df.to_parquet(path, engine) + + +class Base: + def check_error_on_write(self, df, engine, exc, err_msg): + # check that we are raising the exception on writing + with tm.ensure_clean() as path: + with pytest.raises(exc, match=err_msg): + to_parquet(df, path, engine, compression=None) + + def check_external_error_on_write(self, df, engine, exc): + # check that an external library is raising the exception on writing + with tm.ensure_clean() as path: + with tm.external_error_raised(exc): + to_parquet(df, path, engine, compression=None) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_parquet_read_from_url(self, httpserver, datapath, df_compat, engine): + if engine != "auto": + pytest.importorskip(engine) + with open(datapath("io", "data", "parquet", "simple.parquet"), mode="rb") as f: + httpserver.serve_content(content=f.read()) + df = read_parquet(httpserver.url) + tm.assert_frame_equal(df, df_compat) + + +class TestBasic(Base): + def test_error(self, engine): + for obj in [ + pd.Series([1, 2, 3]), + 1, + "foo", + pd.Timestamp("20130101"), + np.array([1, 2, 3]), + ]: + msg = "to_parquet only supports IO with DataFrames" + self.check_error_on_write(obj, engine, ValueError, msg) + + def test_columns_dtypes(self, engine): + df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) + + # unicode + df.columns = ["foo", "bar"] + check_round_trip(df, engine) + + @pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"]) + def test_compression(self, engine, compression): + df = pd.DataFrame({"A": [1, 2, 3]}) + check_round_trip(df, engine, write_kwargs={"compression": compression}) + + def test_read_columns(self, engine): + # GH18154 + df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) + + expected = pd.DataFrame({"string": list("abc")}) + check_round_trip( + df, engine, expected=expected, read_kwargs={"columns": ["string"]} + ) + + def test_read_filters(self, engine, tmp_path): + df = pd.DataFrame( + { + "int": list(range(4)), + "part": list("aabb"), + } + ) + + expected = pd.DataFrame({"int": [0, 1]}) + check_round_trip( + df, + engine, + path=tmp_path, + expected=expected, + write_kwargs={"partition_cols": ["part"]}, + read_kwargs={"filters": [("part", "==", "a")], "columns": ["int"]}, + repeat=1, + ) + + def test_write_index(self, engine, using_copy_on_write, request): + check_names = engine != "fastparquet" + if using_copy_on_write and engine == "fastparquet": + request.applymarker( + pytest.mark.xfail(reason="fastparquet write into index") + ) + + df = pd.DataFrame({"A": [1, 2, 3]}) + check_round_trip(df, engine) + + indexes = [ + [2, 3, 4], + pd.date_range("20130101", periods=3), + list("abc"), + [1, 3, 4], + ] + # non-default index + for index in indexes: + df.index = index + if isinstance(index, pd.DatetimeIndex): + df.index = df.index._with_freq(None) # freq doesn't round-trip + check_round_trip(df, engine, check_names=check_names) + + # index with meta-data + df.index = [0, 1, 2] + df.index.name = "foo" + check_round_trip(df, engine) + + def test_write_multiindex(self, pa): + # Not supported in fastparquet as of 0.1.3 or older pyarrow version + engine = pa + + df = pd.DataFrame({"A": [1, 2, 3]}) + index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]) + df.index = index + check_round_trip(df, engine) + + def test_multiindex_with_columns(self, pa): + engine = pa + dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS") + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((2 * len(dates), 3)), + columns=list("ABC"), + ) + index1 = pd.MultiIndex.from_product( + [["Level1", "Level2"], dates], names=["level", "date"] + ) + index2 = index1.copy(names=None) + for index in [index1, index2]: + df.index = index + + check_round_trip(df, engine) + check_round_trip( + df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]] + ) + + def test_write_ignoring_index(self, engine): + # ENH 20768 + # Ensure index=False omits the index from the written Parquet file. + df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]}) + + write_kwargs = {"compression": None, "index": False} + + # Because we're dropping the index, we expect the loaded dataframe to + # have the default integer index. + expected = df.reset_index(drop=True) + + check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected) + + # Ignore custom index + df = pd.DataFrame( + {"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"] + ) + + check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected) + + # Ignore multi-indexes as well. + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + df = pd.DataFrame( + {"one": list(range(8)), "two": [-i for i in range(8)]}, index=arrays + ) + + expected = df.reset_index(drop=True) + check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected) + + def test_write_column_multiindex(self, engine): + # Not able to write column multi-indexes with non-string column names. + mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]) + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=mi_columns + ) + + if engine == "fastparquet": + self.check_error_on_write( + df, engine, TypeError, "Column name must be a string" + ) + elif engine == "pyarrow": + check_round_trip(df, engine) + + def test_write_column_multiindex_nonstring(self, engine): + # GH #34777 + + # Not able to write column multi-indexes with non-string column names + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + [1, 2, 1, 2, 1, 2, 1, 2], + ] + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((8, 8)), columns=arrays + ) + df.columns.names = ["Level1", "Level2"] + if engine == "fastparquet": + self.check_error_on_write(df, engine, ValueError, "Column name") + elif engine == "pyarrow": + check_round_trip(df, engine) + + def test_write_column_multiindex_string(self, pa): + # GH #34777 + # Not supported in fastparquet as of 0.1.3 + engine = pa + + # Write column multi-indexes with string column names + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((8, 8)), columns=arrays + ) + df.columns.names = ["ColLevel1", "ColLevel2"] + + check_round_trip(df, engine) + + def test_write_column_index_string(self, pa): + # GH #34777 + # Not supported in fastparquet as of 0.1.3 + engine = pa + + # Write column indexes with string column names + arrays = ["bar", "baz", "foo", "qux"] + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), columns=arrays + ) + df.columns.name = "StringCol" + + check_round_trip(df, engine) + + def test_write_column_index_nonstring(self, engine): + # GH #34777 + + # Write column indexes with string column names + arrays = [1, 2, 3, 4] + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), columns=arrays + ) + df.columns.name = "NonStringCol" + if engine == "fastparquet": + self.check_error_on_write( + df, engine, TypeError, "Column name must be a string" + ) + else: + check_round_trip(df, engine) + + def test_dtype_backend(self, engine, request): + pq = pytest.importorskip("pyarrow.parquet") + + if engine == "fastparquet": + # We are manually disabling fastparquet's + # nullable dtype support pending discussion + mark = pytest.mark.xfail( + reason="Fastparquet nullable dtype support is disabled" + ) + request.applymarker(mark) + + table = pyarrow.table( + { + "a": pyarrow.array([1, 2, 3, None], "int64"), + "b": pyarrow.array([1, 2, 3, None], "uint8"), + "c": pyarrow.array(["a", "b", "c", None]), + "d": pyarrow.array([True, False, True, None]), + # Test that nullable dtypes used even in absence of nulls + "e": pyarrow.array([1, 2, 3, 4], "int64"), + # GH 45694 + "f": pyarrow.array([1.0, 2.0, 3.0, None], "float32"), + "g": pyarrow.array([1.0, 2.0, 3.0, None], "float64"), + } + ) + with tm.ensure_clean() as path: + # write manually with pyarrow to write integers + pq.write_table(table, path) + result1 = read_parquet(path, engine=engine) + result2 = read_parquet(path, engine=engine, dtype_backend="numpy_nullable") + + assert result1["a"].dtype == np.dtype("float64") + expected = pd.DataFrame( + { + "a": pd.array([1, 2, 3, None], dtype="Int64"), + "b": pd.array([1, 2, 3, None], dtype="UInt8"), + "c": pd.array(["a", "b", "c", None], dtype="string"), + "d": pd.array([True, False, True, None], dtype="boolean"), + "e": pd.array([1, 2, 3, 4], dtype="Int64"), + "f": pd.array([1.0, 2.0, 3.0, None], dtype="Float32"), + "g": pd.array([1.0, 2.0, 3.0, None], dtype="Float64"), + } + ) + if engine == "fastparquet": + # Fastparquet doesn't support string columns yet + # Only int and boolean + result2 = result2.drop("c", axis=1) + expected = expected.drop("c", axis=1) + tm.assert_frame_equal(result2, expected) + + @pytest.mark.parametrize( + "dtype", + [ + "Int64", + "UInt8", + "boolean", + "object", + "datetime64[ns, UTC]", + "float", + "period[D]", + "Float64", + "string", + ], + ) + def test_read_empty_array(self, pa, dtype): + # GH #41241 + df = pd.DataFrame( + { + "value": pd.array([], dtype=dtype), + } + ) + # GH 45694 + expected = None + if dtype == "float": + expected = pd.DataFrame( + { + "value": pd.array([], dtype="Float64"), + } + ) + check_round_trip( + df, pa, read_kwargs={"dtype_backend": "numpy_nullable"}, expected=expected + ) + + +class TestParquetPyArrow(Base): + def test_basic(self, pa, df_full): + df = df_full + + # additional supported types for pyarrow + dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels") + dti = dti._with_freq(None) # freq doesn't round-trip + df["datetime_tz"] = dti + df["bool_with_none"] = [True, None, True] + + check_round_trip(df, pa) + + def test_basic_subset_columns(self, pa, df_full): + # GH18628 + + df = df_full + # additional supported types for pyarrow + df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels") + + check_round_trip( + df, + pa, + expected=df[["string", "int"]], + read_kwargs={"columns": ["string", "int"]}, + ) + + def test_to_bytes_without_path_or_buf_provided(self, pa, df_full): + # GH 37105 + buf_bytes = df_full.to_parquet(engine=pa) + assert isinstance(buf_bytes, bytes) + + buf_stream = BytesIO(buf_bytes) + res = read_parquet(buf_stream) + + expected = df_full.copy() + expected.loc[1, "string_with_nan"] = None + tm.assert_frame_equal(res, expected) + + def test_duplicate_columns(self, pa): + # not currently able to handle duplicate columns + df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy() + self.check_error_on_write(df, pa, ValueError, "Duplicate column names found") + + def test_timedelta(self, pa): + df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)}) + check_round_trip(df, pa) + + def test_unsupported(self, pa): + # mixed python objects + df = pd.DataFrame({"a": ["a", 1, 2.0]}) + # pyarrow 0.11 raises ArrowTypeError + # older pyarrows raise ArrowInvalid + self.check_external_error_on_write(df, pa, pyarrow.ArrowException) + + def test_unsupported_float16(self, pa): + # #44847, #44914 + # Not able to write float 16 column using pyarrow. + data = np.arange(2, 10, dtype=np.float16) + df = pd.DataFrame(data=data, columns=["fp16"]) + if pa_version_under15p0: + self.check_external_error_on_write(df, pa, pyarrow.ArrowException) + else: + check_round_trip(df, pa) + + @pytest.mark.xfail( + is_platform_windows(), + reason=( + "PyArrow does not cleanup of partial files dumps when unsupported " + "dtypes are passed to_parquet function in windows" + ), + ) + @pytest.mark.skipif(not pa_version_under15p0, reason="float16 works on 15") + @pytest.mark.parametrize("path_type", [str, pathlib.Path]) + def test_unsupported_float16_cleanup(self, pa, path_type): + # #44847, #44914 + # Not able to write float 16 column using pyarrow. + # Tests cleanup by pyarrow in case of an error + data = np.arange(2, 10, dtype=np.float16) + df = pd.DataFrame(data=data, columns=["fp16"]) + + with tm.ensure_clean() as path_str: + path = path_type(path_str) + with tm.external_error_raised(pyarrow.ArrowException): + df.to_parquet(path=path, engine=pa) + assert not os.path.isfile(path) + + def test_categorical(self, pa): + # supported in >= 0.7.0 + df = pd.DataFrame() + df["a"] = pd.Categorical(list("abcdef")) + + # test for null, out-of-order values, and unobserved category + df["b"] = pd.Categorical( + ["bar", "foo", "foo", "bar", None, "bar"], + dtype=pd.CategoricalDtype(["foo", "bar", "baz"]), + ) + + # test for ordered flag + df["c"] = pd.Categorical( + ["a", "b", "c", "a", "c", "b"], categories=["b", "c", "d"], ordered=True + ) + + check_round_trip(df, pa) + + @pytest.mark.single_cpu + def test_s3_roundtrip_explicit_fs(self, df_compat, s3_public_bucket, pa, s3so): + s3fs = pytest.importorskip("s3fs") + s3 = s3fs.S3FileSystem(**s3so) + kw = {"filesystem": s3} + check_round_trip( + df_compat, + pa, + path=f"{s3_public_bucket.name}/pyarrow.parquet", + read_kwargs=kw, + write_kwargs=kw, + ) + + @pytest.mark.single_cpu + def test_s3_roundtrip(self, df_compat, s3_public_bucket, pa, s3so): + # GH #19134 + s3so = {"storage_options": s3so} + check_round_trip( + df_compat, + pa, + path=f"s3://{s3_public_bucket.name}/pyarrow.parquet", + read_kwargs=s3so, + write_kwargs=s3so, + ) + + @pytest.mark.single_cpu + @pytest.mark.parametrize( + "partition_col", + [ + ["A"], + [], + ], + ) + def test_s3_roundtrip_for_dir( + self, df_compat, s3_public_bucket, pa, partition_col, s3so + ): + pytest.importorskip("s3fs") + # GH #26388 + expected_df = df_compat.copy() + + # GH #35791 + if partition_col: + expected_df = expected_df.astype(dict.fromkeys(partition_col, np.int32)) + partition_col_type = "category" + + expected_df[partition_col] = expected_df[partition_col].astype( + partition_col_type + ) + + check_round_trip( + df_compat, + pa, + expected=expected_df, + path=f"s3://{s3_public_bucket.name}/parquet_dir", + read_kwargs={"storage_options": s3so}, + write_kwargs={ + "partition_cols": partition_col, + "compression": None, + "storage_options": s3so, + }, + check_like=True, + repeat=1, + ) + + def test_read_file_like_obj_support(self, df_compat): + pytest.importorskip("pyarrow") + buffer = BytesIO() + df_compat.to_parquet(buffer) + df_from_buf = read_parquet(buffer) + tm.assert_frame_equal(df_compat, df_from_buf) + + def test_expand_user(self, df_compat, monkeypatch): + pytest.importorskip("pyarrow") + monkeypatch.setenv("HOME", "TestingUser") + monkeypatch.setenv("USERPROFILE", "TestingUser") + with pytest.raises(OSError, match=r".*TestingUser.*"): + read_parquet("~/file.parquet") + with pytest.raises(OSError, match=r".*TestingUser.*"): + df_compat.to_parquet("~/file.parquet") + + def test_partition_cols_supported(self, tmp_path, pa, df_full): + # GH #23283 + partition_cols = ["bool", "int"] + df = df_full + df.to_parquet(tmp_path, partition_cols=partition_cols, compression=None) + check_partition_names(tmp_path, partition_cols) + assert read_parquet(tmp_path).shape == df.shape + + def test_partition_cols_string(self, tmp_path, pa, df_full): + # GH #27117 + partition_cols = "bool" + partition_cols_list = [partition_cols] + df = df_full + df.to_parquet(tmp_path, partition_cols=partition_cols, compression=None) + check_partition_names(tmp_path, partition_cols_list) + assert read_parquet(tmp_path).shape == df.shape + + @pytest.mark.parametrize( + "path_type", [str, lambda x: x], ids=["string", "pathlib.Path"] + ) + def test_partition_cols_pathlib(self, tmp_path, pa, df_compat, path_type): + # GH 35902 + + partition_cols = "B" + partition_cols_list = [partition_cols] + df = df_compat + + path = path_type(tmp_path) + df.to_parquet(path, partition_cols=partition_cols_list) + assert read_parquet(path).shape == df.shape + + def test_empty_dataframe(self, pa): + # GH #27339 + df = pd.DataFrame(index=[], columns=[]) + check_round_trip(df, pa) + + def test_write_with_schema(self, pa): + import pyarrow + + df = pd.DataFrame({"x": [0, 1]}) + schema = pyarrow.schema([pyarrow.field("x", type=pyarrow.bool_())]) + out_df = df.astype(bool) + check_round_trip(df, pa, write_kwargs={"schema": schema}, expected=out_df) + + def test_additional_extension_arrays(self, pa): + # test additional ExtensionArrays that are supported through the + # __arrow_array__ protocol + pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype="Int64"), + "b": pd.Series([1, 2, 3], dtype="UInt32"), + "c": pd.Series(["a", None, "c"], dtype="string"), + } + ) + check_round_trip(df, pa) + + df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")}) + check_round_trip(df, pa) + + def test_pyarrow_backed_string_array(self, pa, string_storage): + # test ArrowStringArray supported through the __arrow_array__ protocol + pytest.importorskip("pyarrow") + df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="string[pyarrow]")}) + with pd.option_context("string_storage", string_storage): + check_round_trip(df, pa, expected=df.astype(f"string[{string_storage}]")) + + def test_additional_extension_types(self, pa): + # test additional ExtensionArrays that are supported through the + # __arrow_array__ protocol + by defining a custom ExtensionType + pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "c": pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]), + "d": pd.period_range("2012-01-01", periods=3, freq="D"), + # GH-45881 issue with interval with datetime64[ns] subtype + "e": pd.IntervalIndex.from_breaks( + pd.date_range("2012-01-01", periods=4, freq="D") + ), + } + ) + check_round_trip(df, pa) + + def test_timestamp_nanoseconds(self, pa): + # with version 2.6, pyarrow defaults to writing the nanoseconds, so + # this should work without error + # Note in previous pyarrows(<7.0.0), only the pseudo-version 2.0 was available + ver = "2.6" + df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1ns", periods=10)}) + check_round_trip(df, pa, write_kwargs={"version": ver}) + + def test_timezone_aware_index(self, request, pa, timezone_aware_date_list): + if timezone_aware_date_list.tzinfo != datetime.timezone.utc: + request.applymarker( + pytest.mark.xfail( + reason="temporary skip this test until it is properly resolved: " + "https://github.com/pandas-dev/pandas/issues/37286" + ) + ) + idx = 5 * [timezone_aware_date_list] + df = pd.DataFrame(index=idx, data={"index_as_col": idx}) + + # see gh-36004 + # compare time(zone) values only, skip their class: + # pyarrow always creates fixed offset timezones using pytz.FixedOffset() + # even if it was datetime.timezone() originally + # + # technically they are the same: + # they both implement datetime.tzinfo + # they both wrap datetime.timedelta() + # this use-case sets the resolution to 1 minute + check_round_trip(df, pa, check_dtype=False) + + def test_filter_row_groups(self, pa): + # https://github.com/pandas-dev/pandas/issues/26551 + pytest.importorskip("pyarrow") + df = pd.DataFrame({"a": list(range(3))}) + with tm.ensure_clean() as path: + df.to_parquet(path, engine=pa) + result = read_parquet(path, pa, filters=[("a", "==", 0)]) + assert len(result) == 1 + + def test_read_parquet_manager(self, pa, using_array_manager): + # ensure that read_parquet honors the pandas.options.mode.data_manager option + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["A", "B", "C"] + ) + + with tm.ensure_clean() as path: + df.to_parquet(path, engine=pa) + result = read_parquet(path, pa) + if using_array_manager: + assert isinstance(result._mgr, pd.core.internals.ArrayManager) + else: + assert isinstance(result._mgr, pd.core.internals.BlockManager) + + def test_read_dtype_backend_pyarrow_config(self, pa, df_full): + import pyarrow + + df = df_full + + # additional supported types for pyarrow + dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels") + dti = dti._with_freq(None) # freq doesn't round-trip + df["datetime_tz"] = dti + df["bool_with_none"] = [True, None, True] + + pa_table = pyarrow.Table.from_pandas(df) + expected = pa_table.to_pandas(types_mapper=pd.ArrowDtype) + if pa_version_under13p0: + # pyarrow infers datetimes as us instead of ns + expected["datetime"] = expected["datetime"].astype("timestamp[us][pyarrow]") + expected["datetime_with_nat"] = expected["datetime_with_nat"].astype( + "timestamp[us][pyarrow]" + ) + expected["datetime_tz"] = expected["datetime_tz"].astype( + pd.ArrowDtype(pyarrow.timestamp(unit="us", tz="Europe/Brussels")) + ) + + check_round_trip( + df, + engine=pa, + read_kwargs={"dtype_backend": "pyarrow"}, + expected=expected, + ) + + def test_read_dtype_backend_pyarrow_config_index(self, pa): + df = pd.DataFrame( + {"a": [1, 2]}, index=pd.Index([3, 4], name="test"), dtype="int64[pyarrow]" + ) + expected = df.copy() + import pyarrow + + if Version(pyarrow.__version__) > Version("11.0.0"): + expected.index = expected.index.astype("int64[pyarrow]") + check_round_trip( + df, + engine=pa, + read_kwargs={"dtype_backend": "pyarrow"}, + expected=expected, + ) + + def test_columns_dtypes_not_invalid(self, pa): + df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) + + # numeric + df.columns = [0, 1] + check_round_trip(df, pa) + + # bytes + df.columns = [b"foo", b"bar"] + with pytest.raises(NotImplementedError, match="|S3"): + # Bytes fails on read_parquet + check_round_trip(df, pa) + + # python object + df.columns = [ + datetime.datetime(2011, 1, 1, 0, 0), + datetime.datetime(2011, 1, 1, 1, 1), + ] + check_round_trip(df, pa) + + def test_empty_columns(self, pa): + # GH 52034 + df = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name")) + check_round_trip(df, pa) + + def test_df_attrs_persistence(self, tmp_path, pa): + path = tmp_path / "test_df_metadata.p" + df = pd.DataFrame(data={1: [1]}) + df.attrs = {"test_attribute": 1} + df.to_parquet(path, engine=pa) + new_df = read_parquet(path, engine=pa) + assert new_df.attrs == df.attrs + + def test_string_inference(self, tmp_path, pa): + # GH#54431 + path = tmp_path / "test_string_inference.p" + df = pd.DataFrame(data={"a": ["x", "y"]}, index=["a", "b"]) + df.to_parquet(path, engine="pyarrow") + with pd.option_context("future.infer_string", True): + result = read_parquet(path, engine="pyarrow") + expected = pd.DataFrame( + data={"a": ["x", "y"]}, + dtype="string[pyarrow_numpy]", + index=pd.Index(["a", "b"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.skipif(pa_version_under11p0, reason="not supported before 11.0") + def test_roundtrip_decimal(self, tmp_path, pa): + # GH#54768 + import pyarrow as pa + + path = tmp_path / "decimal.p" + df = pd.DataFrame({"a": [Decimal("123.00")]}, dtype="string[pyarrow]") + df.to_parquet(path, schema=pa.schema([("a", pa.decimal128(5))])) + result = read_parquet(path) + expected = pd.DataFrame({"a": ["123"]}, dtype="string[python]") + tm.assert_frame_equal(result, expected) + + def test_infer_string_large_string_type(self, tmp_path, pa): + # GH#54798 + import pyarrow as pa + import pyarrow.parquet as pq + + path = tmp_path / "large_string.p" + + table = pa.table({"a": pa.array([None, "b", "c"], pa.large_string())}) + pq.write_table(table, path) + + with pd.option_context("future.infer_string", True): + result = read_parquet(path) + expected = pd.DataFrame( + data={"a": [None, "b", "c"]}, + dtype="string[pyarrow_numpy]", + columns=pd.Index(["a"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) + + # NOTE: this test is not run by default, because it requires a lot of memory (>5GB) + # @pytest.mark.slow + # def test_string_column_above_2GB(self, tmp_path, pa): + # # https://github.com/pandas-dev/pandas/issues/55606 + # # above 2GB of string data + # v1 = b"x" * 100000000 + # v2 = b"x" * 147483646 + # df = pd.DataFrame({"strings": [v1] * 20 + [v2] + ["x"] * 20}, dtype="string") + # df.to_parquet(tmp_path / "test.parquet") + # result = read_parquet(tmp_path / "test.parquet") + # assert result["strings"].dtype == "string" + + +class TestParquetFastParquet(Base): + def test_basic(self, fp, df_full): + df = df_full + + dti = pd.date_range("20130101", periods=3, tz="US/Eastern") + dti = dti._with_freq(None) # freq doesn't round-trip + df["datetime_tz"] = dti + df["timedelta"] = pd.timedelta_range("1 day", periods=3) + check_round_trip(df, fp) + + def test_columns_dtypes_invalid(self, fp): + df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) + + err = TypeError + msg = "Column name must be a string" + + # numeric + df.columns = [0, 1] + self.check_error_on_write(df, fp, err, msg) + + # bytes + df.columns = [b"foo", b"bar"] + self.check_error_on_write(df, fp, err, msg) + + # python object + df.columns = [ + datetime.datetime(2011, 1, 1, 0, 0), + datetime.datetime(2011, 1, 1, 1, 1), + ] + self.check_error_on_write(df, fp, err, msg) + + def test_duplicate_columns(self, fp): + # not currently able to handle duplicate columns + df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy() + msg = "Cannot create parquet dataset with duplicate column names" + self.check_error_on_write(df, fp, ValueError, msg) + + def test_bool_with_none(self, fp): + df = pd.DataFrame({"a": [True, None, False]}) + expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16") + # Fastparquet bug in 0.7.1 makes it so that this dtype becomes + # float64 + check_round_trip(df, fp, expected=expected, check_dtype=False) + + def test_unsupported(self, fp): + # period + df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)}) + # error from fastparquet -> don't check exact error message + self.check_error_on_write(df, fp, ValueError, None) + + # mixed + df = pd.DataFrame({"a": ["a", 1, 2.0]}) + msg = "Can't infer object conversion type" + self.check_error_on_write(df, fp, ValueError, msg) + + def test_categorical(self, fp): + df = pd.DataFrame({"a": pd.Categorical(list("abc"))}) + check_round_trip(df, fp) + + def test_filter_row_groups(self, fp): + d = {"a": list(range(3))} + df = pd.DataFrame(d) + with tm.ensure_clean() as path: + df.to_parquet(path, engine=fp, compression=None, row_group_offsets=1) + result = read_parquet(path, fp, filters=[("a", "==", 0)]) + assert len(result) == 1 + + @pytest.mark.single_cpu + def test_s3_roundtrip(self, df_compat, s3_public_bucket, fp, s3so): + # GH #19134 + check_round_trip( + df_compat, + fp, + path=f"s3://{s3_public_bucket.name}/fastparquet.parquet", + read_kwargs={"storage_options": s3so}, + write_kwargs={"compression": None, "storage_options": s3so}, + ) + + def test_partition_cols_supported(self, tmp_path, fp, df_full): + # GH #23283 + partition_cols = ["bool", "int"] + df = df_full + df.to_parquet( + tmp_path, + engine="fastparquet", + partition_cols=partition_cols, + compression=None, + ) + assert os.path.exists(tmp_path) + import fastparquet + + actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats + assert len(actual_partition_cols) == 2 + + def test_partition_cols_string(self, tmp_path, fp, df_full): + # GH #27117 + partition_cols = "bool" + df = df_full + df.to_parquet( + tmp_path, + engine="fastparquet", + partition_cols=partition_cols, + compression=None, + ) + assert os.path.exists(tmp_path) + import fastparquet + + actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats + assert len(actual_partition_cols) == 1 + + def test_partition_on_supported(self, tmp_path, fp, df_full): + # GH #23283 + partition_cols = ["bool", "int"] + df = df_full + df.to_parquet( + tmp_path, + engine="fastparquet", + compression=None, + partition_on=partition_cols, + ) + assert os.path.exists(tmp_path) + import fastparquet + + actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats + assert len(actual_partition_cols) == 2 + + def test_error_on_using_partition_cols_and_partition_on( + self, tmp_path, fp, df_full + ): + # GH #23283 + partition_cols = ["bool", "int"] + df = df_full + msg = ( + "Cannot use both partition_on and partition_cols. Use partition_cols for " + "partitioning data" + ) + with pytest.raises(ValueError, match=msg): + df.to_parquet( + tmp_path, + engine="fastparquet", + compression=None, + partition_on=partition_cols, + partition_cols=partition_cols, + ) + + @pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index") + def test_empty_dataframe(self, fp): + # GH #27339 + df = pd.DataFrame() + expected = df.copy() + check_round_trip(df, fp, expected=expected) + + @pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index") + def test_timezone_aware_index(self, fp, timezone_aware_date_list): + idx = 5 * [timezone_aware_date_list] + + df = pd.DataFrame(index=idx, data={"index_as_col": idx}) + + expected = df.copy() + expected.index.name = "index" + check_round_trip(df, fp, expected=expected) + + def test_use_nullable_dtypes_not_supported(self, fp): + df = pd.DataFrame({"a": [1, 2]}) + + with tm.ensure_clean() as path: + df.to_parquet(path) + with pytest.raises(ValueError, match="not supported for the fastparquet"): + with tm.assert_produces_warning(FutureWarning): + read_parquet(path, engine="fastparquet", use_nullable_dtypes=True) + with pytest.raises(ValueError, match="not supported for the fastparquet"): + read_parquet(path, engine="fastparquet", dtype_backend="pyarrow") + + def test_close_file_handle_on_read_error(self): + with tm.ensure_clean("test.parquet") as path: + pathlib.Path(path).write_bytes(b"breakit") + with pytest.raises(Exception, match=""): # Not important which exception + read_parquet(path, engine="fastparquet") + # The next line raises an error on Windows if the file is still open + pathlib.Path(path).unlink(missing_ok=False) + + def test_bytes_file_name(self, engine): + # GH#48944 + df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]}) + with tm.ensure_clean("test.parquet") as path: + with open(path.encode(), "wb") as f: + df.to_parquet(f) + + result = read_parquet(path, engine=engine) + tm.assert_frame_equal(result, df) + + def test_filesystem_notimplemented(self): + pytest.importorskip("fastparquet") + df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]}) + with tm.ensure_clean() as path: + with pytest.raises( + NotImplementedError, match="filesystem is not implemented" + ): + df.to_parquet(path, engine="fastparquet", filesystem="foo") + + with tm.ensure_clean() as path: + pathlib.Path(path).write_bytes(b"foo") + with pytest.raises( + NotImplementedError, match="filesystem is not implemented" + ): + read_parquet(path, engine="fastparquet", filesystem="foo") + + def test_invalid_filesystem(self): + pytest.importorskip("pyarrow") + df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]}) + with tm.ensure_clean() as path: + with pytest.raises( + ValueError, match="filesystem must be a pyarrow or fsspec FileSystem" + ): + df.to_parquet(path, engine="pyarrow", filesystem="foo") + + with tm.ensure_clean() as path: + pathlib.Path(path).write_bytes(b"foo") + with pytest.raises( + ValueError, match="filesystem must be a pyarrow or fsspec FileSystem" + ): + read_parquet(path, engine="pyarrow", filesystem="foo") + + def test_unsupported_pa_filesystem_storage_options(self): + pa_fs = pytest.importorskip("pyarrow.fs") + df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]}) + with tm.ensure_clean() as path: + with pytest.raises( + NotImplementedError, + match="storage_options not supported with a pyarrow FileSystem.", + ): + df.to_parquet( + path, + engine="pyarrow", + filesystem=pa_fs.LocalFileSystem(), + storage_options={"foo": "bar"}, + ) + + with tm.ensure_clean() as path: + pathlib.Path(path).write_bytes(b"foo") + with pytest.raises( + NotImplementedError, + match="storage_options not supported with a pyarrow FileSystem.", + ): + read_parquet( + path, + engine="pyarrow", + filesystem=pa_fs.LocalFileSystem(), + storage_options={"foo": "bar"}, + ) + + def test_invalid_dtype_backend(self, engine): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + df = pd.DataFrame({"int": list(range(1, 4))}) + with tm.ensure_clean("tmp.parquet") as path: + df.to_parquet(path) + with pytest.raises(ValueError, match=msg): + read_parquet(path, dtype_backend="numpy") + + @pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index") + def test_empty_columns(self, fp): + # GH 52034 + df = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name")) + expected = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name")) + check_round_trip(df, fp, expected=expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_pickle.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..4f3993a038197e52c7f21fb4f4d40425e897600f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_pickle.py @@ -0,0 +1,652 @@ +""" +manage legacy pickle tests + +How to add pickle tests: + +1. Install pandas version intended to output the pickle. + +2. Execute "generate_legacy_storage_files.py" to create the pickle. +$ python generate_legacy_storage_files.py pickle + +3. Move the created pickle to "data/legacy_pickle/" directory. +""" +from __future__ import annotations + +from array import array +import bz2 +import datetime +import functools +from functools import partial +import gzip +import io +import os +from pathlib import Path +import pickle +import shutil +import tarfile +from typing import Any +import uuid +import zipfile + +import numpy as np +import pytest + +from pandas.compat import ( + get_lzma_file, + is_platform_little_endian, +) +from pandas.compat._optional import import_optional_dependency +from pandas.compat.compressors import flatten_buffer +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + period_range, +) +import pandas._testing as tm +from pandas.tests.io.generate_legacy_storage_files import create_pickle_data + +import pandas.io.common as icom +from pandas.tseries.offsets import ( + Day, + MonthEnd, +) + + +# --------------------- +# comparison functions +# --------------------- +def compare_element(result, expected, typ): + if isinstance(expected, Index): + tm.assert_index_equal(expected, result) + return + + if typ.startswith("sp_"): + tm.assert_equal(result, expected) + elif typ == "timestamp": + if expected is pd.NaT: + assert result is pd.NaT + else: + assert result == expected + else: + comparator = getattr(tm, f"assert_{typ}_equal", tm.assert_almost_equal) + comparator(result, expected) + + +# --------------------- +# tests +# --------------------- + + +@pytest.mark.parametrize( + "data", + [ + b"123", + b"123456", + bytearray(b"123"), + memoryview(b"123"), + pickle.PickleBuffer(b"123"), + array("I", [1, 2, 3]), + memoryview(b"123456").cast("B", (3, 2)), + memoryview(b"123456").cast("B", (3, 2))[::2], + np.arange(12).reshape((3, 4), order="C"), + np.arange(12).reshape((3, 4), order="F"), + np.arange(12).reshape((3, 4), order="C")[:, ::2], + ], +) +def test_flatten_buffer(data): + result = flatten_buffer(data) + expected = memoryview(data).tobytes("A") + assert result == expected + if isinstance(data, (bytes, bytearray)): + assert result is data + elif isinstance(result, memoryview): + assert result.ndim == 1 + assert result.format == "B" + assert result.contiguous + assert result.shape == (result.nbytes,) + + +def test_pickles(datapath): + if not is_platform_little_endian(): + pytest.skip("known failure on non-little endian") + + # For loop for compat with --strict-data-files + for legacy_pickle in Path(__file__).parent.glob("data/legacy_pickle/*/*.p*kl*"): + legacy_pickle = datapath(legacy_pickle) + + data = pd.read_pickle(legacy_pickle) + + for typ, dv in data.items(): + for dt, result in dv.items(): + expected = data[typ][dt] + + if typ == "series" and dt == "ts": + # GH 7748 + tm.assert_series_equal(result, expected) + assert result.index.freq == expected.index.freq + assert not result.index.freq.normalize + tm.assert_series_equal(result > 0, expected > 0) + + # GH 9291 + freq = result.index.freq + assert freq + Day(1) == Day(2) + + res = freq + pd.Timedelta(hours=1) + assert isinstance(res, pd.Timedelta) + assert res == pd.Timedelta(days=1, hours=1) + + res = freq + pd.Timedelta(nanoseconds=1) + assert isinstance(res, pd.Timedelta) + assert res == pd.Timedelta(days=1, nanoseconds=1) + elif typ == "index" and dt == "period": + tm.assert_index_equal(result, expected) + assert isinstance(result.freq, MonthEnd) + assert result.freq == MonthEnd() + assert result.freqstr == "M" + tm.assert_index_equal(result.shift(2), expected.shift(2)) + elif typ == "series" and dt in ("dt_tz", "cat"): + tm.assert_series_equal(result, expected) + elif typ == "frame" and dt in ( + "dt_mixed_tzs", + "cat_onecol", + "cat_and_float", + ): + tm.assert_frame_equal(result, expected) + else: + compare_element(result, expected, typ) + + +def python_pickler(obj, path): + with open(path, "wb") as fh: + pickle.dump(obj, fh, protocol=-1) + + +def python_unpickler(path): + with open(path, "rb") as fh: + fh.seek(0) + return pickle.load(fh) + + +def flatten(data: dict) -> list[tuple[str, Any]]: + """Flatten create_pickle_data""" + return [ + (typ, example) + for typ, examples in data.items() + for example in examples.values() + ] + + +@pytest.mark.parametrize( + "pickle_writer", + [ + pytest.param(python_pickler, id="python"), + pytest.param(pd.to_pickle, id="pandas_proto_default"), + pytest.param( + functools.partial(pd.to_pickle, protocol=pickle.HIGHEST_PROTOCOL), + id="pandas_proto_highest", + ), + pytest.param(functools.partial(pd.to_pickle, protocol=4), id="pandas_proto_4"), + pytest.param( + functools.partial(pd.to_pickle, protocol=5), + id="pandas_proto_5", + ), + ], +) +@pytest.mark.parametrize("writer", [pd.to_pickle, python_pickler]) +@pytest.mark.parametrize("typ, expected", flatten(create_pickle_data())) +def test_round_trip_current(typ, expected, pickle_writer, writer): + with tm.ensure_clean() as path: + # test writing with each pickler + pickle_writer(expected, path) + + # test reading with each unpickler + result = pd.read_pickle(path) + compare_element(result, expected, typ) + + result = python_unpickler(path) + compare_element(result, expected, typ) + + # and the same for file objects (GH 35679) + with open(path, mode="wb") as handle: + writer(expected, path) + handle.seek(0) # shouldn't close file handle + with open(path, mode="rb") as handle: + result = pd.read_pickle(handle) + handle.seek(0) # shouldn't close file handle + compare_element(result, expected, typ) + + +def test_pickle_path_pathlib(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + result = tm.round_trip_pathlib(df.to_pickle, pd.read_pickle) + tm.assert_frame_equal(df, result) + + +def test_pickle_path_localpath(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + result = tm.round_trip_localpath(df.to_pickle, pd.read_pickle) + tm.assert_frame_equal(df, result) + + +# --------------------- +# test pickle compression +# --------------------- + + +@pytest.fixture +def get_random_path(): + return f"__{uuid.uuid4()}__.pickle" + + +class TestCompression: + _extension_to_compression = icom.extension_to_compression + + def compress_file(self, src_path, dest_path, compression): + if compression is None: + shutil.copyfile(src_path, dest_path) + return + + if compression == "gzip": + f = gzip.open(dest_path, "w") + elif compression == "bz2": + f = bz2.BZ2File(dest_path, "w") + elif compression == "zip": + with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f: + f.write(src_path, os.path.basename(src_path)) + elif compression == "tar": + with open(src_path, "rb") as fh: + with tarfile.open(dest_path, mode="w") as tar: + tarinfo = tar.gettarinfo(src_path, os.path.basename(src_path)) + tar.addfile(tarinfo, fh) + elif compression == "xz": + f = get_lzma_file()(dest_path, "w") + elif compression == "zstd": + f = import_optional_dependency("zstandard").open(dest_path, "wb") + else: + msg = f"Unrecognized compression type: {compression}" + raise ValueError(msg) + + if compression not in ["zip", "tar"]: + with open(src_path, "rb") as fh: + with f: + f.write(fh.read()) + + def test_write_explicit(self, compression, get_random_path): + base = get_random_path + path1 = base + ".compressed" + path2 = base + ".raw" + + with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # write to compressed file + df.to_pickle(p1, compression=compression) + + # decompress + with tm.decompress_file(p1, compression=compression) as f: + with open(p2, "wb") as fh: + fh.write(f.read()) + + # read decompressed file + df2 = pd.read_pickle(p2, compression=None) + + tm.assert_frame_equal(df, df2) + + @pytest.mark.parametrize("compression", ["", "None", "bad", "7z"]) + def test_write_explicit_bad(self, compression, get_random_path): + with pytest.raises(ValueError, match="Unrecognized compression type"): + with tm.ensure_clean(get_random_path) as path: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_pickle(path, compression=compression) + + def test_write_infer(self, compression_ext, get_random_path): + base = get_random_path + path1 = base + compression_ext + path2 = base + ".raw" + compression = self._extension_to_compression.get(compression_ext.lower()) + + with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # write to compressed file by inferred compression method + df.to_pickle(p1) + + # decompress + with tm.decompress_file(p1, compression=compression) as f: + with open(p2, "wb") as fh: + fh.write(f.read()) + + # read decompressed file + df2 = pd.read_pickle(p2, compression=None) + + tm.assert_frame_equal(df, df2) + + def test_read_explicit(self, compression, get_random_path): + base = get_random_path + path1 = base + ".raw" + path2 = base + ".compressed" + + with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # write to uncompressed file + df.to_pickle(p1, compression=None) + + # compress + self.compress_file(p1, p2, compression=compression) + + # read compressed file + df2 = pd.read_pickle(p2, compression=compression) + tm.assert_frame_equal(df, df2) + + def test_read_infer(self, compression_ext, get_random_path): + base = get_random_path + path1 = base + ".raw" + path2 = base + compression_ext + compression = self._extension_to_compression.get(compression_ext.lower()) + + with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # write to uncompressed file + df.to_pickle(p1, compression=None) + + # compress + self.compress_file(p1, p2, compression=compression) + + # read compressed file by inferred compression method + df2 = pd.read_pickle(p2) + tm.assert_frame_equal(df, df2) + + +# --------------------- +# test pickle compression +# --------------------- + + +class TestProtocol: + @pytest.mark.parametrize("protocol", [-1, 0, 1, 2]) + def test_read(self, protocol, get_random_path): + with tm.ensure_clean(get_random_path) as path: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_pickle(path, protocol=protocol) + df2 = pd.read_pickle(path) + tm.assert_frame_equal(df, df2) + + +@pytest.mark.parametrize( + ["pickle_file", "excols"], + [ + ("test_py27.pkl", Index(["a", "b", "c"])), + ( + "test_mi_py27.pkl", + pd.MultiIndex.from_arrays([["a", "b", "c"], ["A", "B", "C"]]), + ), + ], +) +def test_unicode_decode_error(datapath, pickle_file, excols): + # pickle file written with py27, should be readable without raising + # UnicodeDecodeError, see GH#28645 and GH#31988 + path = datapath("io", "data", "pickle", pickle_file) + df = pd.read_pickle(path) + + # just test the columns are correct since the values are random + tm.assert_index_equal(df.columns, excols) + + +# --------------------- +# tests for buffer I/O +# --------------------- + + +def test_pickle_buffer_roundtrip(): + with tm.ensure_clean() as path: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with open(path, "wb") as fh: + df.to_pickle(fh) + with open(path, "rb") as fh: + result = pd.read_pickle(fh) + tm.assert_frame_equal(df, result) + + +# --------------------- +# tests for URL I/O +# --------------------- + + +@pytest.mark.parametrize( + "mockurl", ["http://url.com", "ftp://test.com", "http://gzip.com"] +) +def test_pickle_generalurl_read(monkeypatch, mockurl): + def python_pickler(obj, path): + with open(path, "wb") as fh: + pickle.dump(obj, fh, protocol=-1) + + class MockReadResponse: + def __init__(self, path) -> None: + self.file = open(path, "rb") + if "gzip" in path: + self.headers = {"Content-Encoding": "gzip"} + else: + self.headers = {"Content-Encoding": ""} + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def read(self): + return self.file.read() + + def close(self): + return self.file.close() + + with tm.ensure_clean() as path: + + def mock_urlopen_read(*args, **kwargs): + return MockReadResponse(path) + + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + python_pickler(df, path) + monkeypatch.setattr("urllib.request.urlopen", mock_urlopen_read) + result = pd.read_pickle(mockurl) + tm.assert_frame_equal(df, result) + + +def test_pickle_fsspec_roundtrip(): + pytest.importorskip("fsspec") + with tm.ensure_clean(): + mockurl = "memory://mockfile" + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_pickle(mockurl) + result = pd.read_pickle(mockurl) + tm.assert_frame_equal(df, result) + + +class MyTz(datetime.tzinfo): + def __init__(self) -> None: + pass + + +def test_read_pickle_with_subclass(): + # GH 12163 + expected = Series(dtype=object), MyTz() + result = tm.round_trip_pickle(expected) + + tm.assert_series_equal(result[0], expected[0]) + assert isinstance(result[1], MyTz) + + +def test_pickle_binary_object_compression(compression): + """ + Read/write from binary file-objects w/wo compression. + + GH 26237, GH 29054, and GH 29570 + """ + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # reference for compression + with tm.ensure_clean() as path: + df.to_pickle(path, compression=compression) + reference = Path(path).read_bytes() + + # write + buffer = io.BytesIO() + df.to_pickle(buffer, compression=compression) + buffer.seek(0) + + # gzip and zip safe the filename: cannot compare the compressed content + assert buffer.getvalue() == reference or compression in ("gzip", "zip", "tar") + + # read + read_df = pd.read_pickle(buffer, compression=compression) + buffer.seek(0) + tm.assert_frame_equal(df, read_df) + + +def test_pickle_dataframe_with_multilevel_index( + multiindex_year_month_day_dataframe_random_data, + multiindex_dataframe_random_data, +): + ymd = multiindex_year_month_day_dataframe_random_data + frame = multiindex_dataframe_random_data + + def _test_roundtrip(frame): + unpickled = tm.round_trip_pickle(frame) + tm.assert_frame_equal(frame, unpickled) + + _test_roundtrip(frame) + _test_roundtrip(frame.T) + _test_roundtrip(ymd) + _test_roundtrip(ymd.T) + + +def test_pickle_timeseries_periodindex(): + # GH#2891 + prng = period_range("1/1/2011", "1/1/2012", freq="M") + ts = Series(np.random.default_rng(2).standard_normal(len(prng)), prng) + new_ts = tm.round_trip_pickle(ts) + assert new_ts.index.freqstr == "M" + + +@pytest.mark.parametrize( + "name", [777, 777.0, "name", datetime.datetime(2001, 11, 11), (1, 2)] +) +def test_pickle_preserve_name(name): + unpickled = tm.round_trip_pickle(Series(np.arange(10, dtype=np.float64), name=name)) + assert unpickled.name == name + + +def test_pickle_datetimes(datetime_series): + unp_ts = tm.round_trip_pickle(datetime_series) + tm.assert_series_equal(unp_ts, datetime_series) + + +def test_pickle_strings(string_series): + unp_series = tm.round_trip_pickle(string_series) + tm.assert_series_equal(unp_series, string_series) + + +@td.skip_array_manager_invalid_test +def test_pickle_preserves_block_ndim(): + # GH#37631 + ser = Series(list("abc")).astype("category").iloc[[0]] + res = tm.round_trip_pickle(ser) + + assert res._mgr.blocks[0].ndim == 1 + assert res._mgr.blocks[0].shape == (1,) + + # GH#37631 OP issue was about indexing, underlying problem was pickle + tm.assert_series_equal(res[[True]], ser) + + +@pytest.mark.parametrize("protocol", [pickle.DEFAULT_PROTOCOL, pickle.HIGHEST_PROTOCOL]) +def test_pickle_big_dataframe_compression(protocol, compression): + # GH#39002 + df = DataFrame(range(100000)) + result = tm.round_trip_pathlib( + partial(df.to_pickle, protocol=protocol, compression=compression), + partial(pd.read_pickle, compression=compression), + ) + tm.assert_frame_equal(df, result) + + +def test_pickle_frame_v124_unpickle_130(datapath): + # GH#42345 DataFrame created in 1.2.x, unpickle in 1.3.x + path = datapath( + Path(__file__).parent, + "data", + "legacy_pickle", + "1.2.4", + "empty_frame_v1_2_4-GH#42345.pkl", + ) + with open(path, "rb") as fd: + df = pickle.load(fd) + + expected = DataFrame(index=[], columns=[]) + tm.assert_frame_equal(df, expected) + + +def test_pickle_pos_args_deprecation(): + # GH-54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_pickle except for the " + r"argument 'path' will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + buffer = io.BytesIO() + df.to_pickle(buffer, "infer") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_s3.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_s3.py new file mode 100644 index 0000000000000000000000000000000000000000..79473895b662da6af68fbe29a60eb05f134a54df --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_s3.py @@ -0,0 +1,43 @@ +from io import BytesIO + +import pytest + +from pandas import read_csv + + +def test_streaming_s3_objects(): + # GH17135 + # botocore gained iteration support in 1.10.47, can now be used in read_* + pytest.importorskip("botocore", minversion="1.10.47") + from botocore.response import StreamingBody + + data = [b"foo,bar,baz\n1,2,3\n4,5,6\n", b"just,the,header\n"] + for el in data: + body = StreamingBody(BytesIO(el), content_length=len(el)) + read_csv(body) + + +@pytest.mark.single_cpu +def test_read_without_creds_from_pub_bucket(s3_public_bucket_with_data, s3so): + # GH 34626 + pytest.importorskip("s3fs") + result = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv", + nrows=3, + storage_options=s3so, + ) + assert len(result) == 3 + + +@pytest.mark.single_cpu +def test_read_with_creds_from_pub_bucket(s3_public_bucket_with_data, s3so): + # Ensure we can read from a public bucket with credentials + # GH 34626 + pytest.importorskip("s3fs") + df = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv", + nrows=5, + header=None, + storage_options=s3so, + ) + assert len(df) == 5 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_spss.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_spss.py new file mode 100644 index 0000000000000000000000000000000000000000..e118c90d9bc02041719cd1452b5af8e77b12db77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_spss.py @@ -0,0 +1,164 @@ +import datetime +from pathlib import Path + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.util.version import Version + +pyreadstat = pytest.importorskip("pyreadstat") + + +# TODO(CoW) - detection of chained assignment in cython +# https://github.com/pandas-dev/pandas/issues/51315 +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +@pytest.mark.parametrize("path_klass", [lambda p: p, Path]) +def test_spss_labelled_num(path_klass, datapath): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = path_klass(datapath("io", "data", "spss", "labelled-num.sav")) + + df = pd.read_spss(fname, convert_categoricals=True) + expected = pd.DataFrame({"VAR00002": "This is one"}, index=[0]) + expected["VAR00002"] = pd.Categorical(expected["VAR00002"]) + tm.assert_frame_equal(df, expected) + + df = pd.read_spss(fname, convert_categoricals=False) + expected = pd.DataFrame({"VAR00002": 1.0}, index=[0]) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +def test_spss_labelled_num_na(datapath): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = datapath("io", "data", "spss", "labelled-num-na.sav") + + df = pd.read_spss(fname, convert_categoricals=True) + expected = pd.DataFrame({"VAR00002": ["This is one", None]}) + expected["VAR00002"] = pd.Categorical(expected["VAR00002"]) + tm.assert_frame_equal(df, expected) + + df = pd.read_spss(fname, convert_categoricals=False) + expected = pd.DataFrame({"VAR00002": [1.0, np.nan]}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +def test_spss_labelled_str(datapath): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = datapath("io", "data", "spss", "labelled-str.sav") + + df = pd.read_spss(fname, convert_categoricals=True) + expected = pd.DataFrame({"gender": ["Male", "Female"]}) + expected["gender"] = pd.Categorical(expected["gender"]) + tm.assert_frame_equal(df, expected) + + df = pd.read_spss(fname, convert_categoricals=False) + expected = pd.DataFrame({"gender": ["M", "F"]}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +def test_spss_umlauts(datapath): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = datapath("io", "data", "spss", "umlauts.sav") + + df = pd.read_spss(fname, convert_categoricals=True) + expected = pd.DataFrame( + {"var1": ["the ä umlaut", "the ü umlaut", "the ä umlaut", "the ö umlaut"]} + ) + expected["var1"] = pd.Categorical(expected["var1"]) + tm.assert_frame_equal(df, expected) + + df = pd.read_spss(fname, convert_categoricals=False) + expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]}) + tm.assert_frame_equal(df, expected) + + +def test_spss_usecols(datapath): + # usecols must be list-like + fname = datapath("io", "data", "spss", "labelled-num.sav") + + with pytest.raises(TypeError, match="usecols must be list-like."): + pd.read_spss(fname, usecols="VAR00002") + + +def test_spss_umlauts_dtype_backend(datapath, dtype_backend): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = datapath("io", "data", "spss", "umlauts.sav") + + df = pd.read_spss(fname, convert_categoricals=False, dtype_backend=dtype_backend) + expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]}, dtype="Int64") + + if dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + + from pandas.arrays import ArrowExtensionArray + + expected = pd.DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + + tm.assert_frame_equal(df, expected) + + +def test_invalid_dtype_backend(): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + pd.read_spss("test", dtype_backend="numpy") + + +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +def test_spss_metadata(datapath): + # GH 54264 + fname = datapath("io", "data", "spss", "labelled-num.sav") + + df = pd.read_spss(fname) + metadata = { + "column_names": ["VAR00002"], + "column_labels": [None], + "column_names_to_labels": {"VAR00002": None}, + "file_encoding": "UTF-8", + "number_columns": 1, + "number_rows": 1, + "variable_value_labels": {"VAR00002": {1.0: "This is one"}}, + "value_labels": {"labels0": {1.0: "This is one"}}, + "variable_to_label": {"VAR00002": "labels0"}, + "notes": [], + "original_variable_types": {"VAR00002": "F8.0"}, + "readstat_variable_types": {"VAR00002": "double"}, + "table_name": None, + "missing_ranges": {}, + "missing_user_values": {}, + "variable_storage_width": {"VAR00002": 8}, + "variable_display_width": {"VAR00002": 8}, + "variable_alignment": {"VAR00002": "unknown"}, + "variable_measure": {"VAR00002": "unknown"}, + "file_label": None, + "file_format": "sav/zsav", + } + if Version(pyreadstat.__version__) >= Version("1.2.4"): + metadata.update( + { + "creation_time": datetime.datetime(2015, 2, 6, 14, 33, 36), + "modification_time": datetime.datetime(2015, 2, 6, 14, 33, 36), + } + ) + assert df.attrs == metadata diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_sql.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_sql.py new file mode 100644 index 0000000000000000000000000000000000000000..4f1f965f26aa9f2e400471dfc14641cf65d0bdf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_sql.py @@ -0,0 +1,4388 @@ +from __future__ import annotations + +import contextlib +from contextlib import closing +import csv +from datetime import ( + date, + datetime, + time, + timedelta, +) +from io import StringIO +from pathlib import Path +import sqlite3 +from typing import TYPE_CHECKING +import uuid + +import numpy as np +import pytest + +from pandas._libs import lib +from pandas.compat import ( + pa_version_under13p0, + pa_version_under14p1, +) +from pandas.compat._optional import import_optional_dependency +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + Timestamp, + concat, + date_range, + isna, + to_datetime, + to_timedelta, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) +from pandas.util.version import Version + +from pandas.io import sql +from pandas.io.sql import ( + SQLAlchemyEngine, + SQLDatabase, + SQLiteDatabase, + get_engine, + pandasSQL_builder, + read_sql_query, + read_sql_table, +) + +if TYPE_CHECKING: + import sqlalchemy + + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def sql_strings(): + return { + "read_parameters": { + "sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?", + "mysql": "SELECT * FROM iris WHERE `Name`=%s AND `SepalLength`=%s", + "postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s', + }, + "read_named_parameters": { + "sqlite": """ + SELECT * FROM iris WHERE Name=:name AND SepalLength=:length + """, + "mysql": """ + SELECT * FROM iris WHERE + `Name`=%(name)s AND `SepalLength`=%(length)s + """, + "postgresql": """ + SELECT * FROM iris WHERE + "Name"=%(name)s AND "SepalLength"=%(length)s + """, + }, + "read_no_parameters_with_percent": { + "sqlite": "SELECT * FROM iris WHERE Name LIKE '%'", + "mysql": "SELECT * FROM iris WHERE `Name` LIKE '%'", + "postgresql": "SELECT * FROM iris WHERE \"Name\" LIKE '%'", + }, + } + + +def iris_table_metadata(): + import sqlalchemy + from sqlalchemy import ( + Column, + Double, + Float, + MetaData, + String, + Table, + ) + + dtype = Double if Version(sqlalchemy.__version__) >= Version("2.0.0") else Float + metadata = MetaData() + iris = Table( + "iris", + metadata, + Column("SepalLength", dtype), + Column("SepalWidth", dtype), + Column("PetalLength", dtype), + Column("PetalWidth", dtype), + Column("Name", String(200)), + ) + return iris + + +def create_and_load_iris_sqlite3(conn, iris_file: Path): + stmt = """CREATE TABLE iris ( + "SepalLength" REAL, + "SepalWidth" REAL, + "PetalLength" REAL, + "PetalWidth" REAL, + "Name" TEXT + )""" + + cur = conn.cursor() + cur.execute(stmt) + with iris_file.open(newline=None, encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + next(reader) + stmt = "INSERT INTO iris VALUES(?, ?, ?, ?, ?)" + # ADBC requires explicit types - no implicit str -> float conversion + records = [] + records = [ + ( + float(row[0]), + float(row[1]), + float(row[2]), + float(row[3]), + row[4], + ) + for row in reader + ] + + cur.executemany(stmt, records) + cur.close() + + conn.commit() + + +def create_and_load_iris_postgresql(conn, iris_file: Path): + stmt = """CREATE TABLE iris ( + "SepalLength" DOUBLE PRECISION, + "SepalWidth" DOUBLE PRECISION, + "PetalLength" DOUBLE PRECISION, + "PetalWidth" DOUBLE PRECISION, + "Name" TEXT + )""" + with conn.cursor() as cur: + cur.execute(stmt) + with iris_file.open(newline=None, encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + next(reader) + stmt = "INSERT INTO iris VALUES($1, $2, $3, $4, $5)" + # ADBC requires explicit types - no implicit str -> float conversion + records = [ + ( + float(row[0]), + float(row[1]), + float(row[2]), + float(row[3]), + row[4], + ) + for row in reader + ] + + cur.executemany(stmt, records) + + conn.commit() + + +def create_and_load_iris(conn, iris_file: Path): + from sqlalchemy import insert + + iris = iris_table_metadata() + + with iris_file.open(newline=None, encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + header = next(reader) + params = [dict(zip(header, row)) for row in reader] + stmt = insert(iris).values(params) + with conn.begin() as con: + iris.drop(con, checkfirst=True) + iris.create(bind=con) + con.execute(stmt) + + +def create_and_load_iris_view(conn): + stmt = "CREATE VIEW iris_view AS SELECT * FROM iris" + if isinstance(conn, sqlite3.Connection): + cur = conn.cursor() + cur.execute(stmt) + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if adbc and isinstance(conn, adbc.Connection): + with conn.cursor() as cur: + cur.execute(stmt) + conn.commit() + else: + from sqlalchemy import text + + stmt = text(stmt) + with conn.begin() as con: + con.execute(stmt) + + +def types_table_metadata(dialect: str): + from sqlalchemy import ( + TEXT, + Boolean, + Column, + DateTime, + Float, + Integer, + MetaData, + Table, + ) + + date_type = TEXT if dialect == "sqlite" else DateTime + bool_type = Integer if dialect == "sqlite" else Boolean + metadata = MetaData() + types = Table( + "types", + metadata, + Column("TextCol", TEXT), + Column("DateCol", date_type), + Column("IntDateCol", Integer), + Column("IntDateOnlyCol", Integer), + Column("FloatCol", Float), + Column("IntCol", Integer), + Column("BoolCol", bool_type), + Column("IntColWithNull", Integer), + Column("BoolColWithNull", bool_type), + ) + return types + + +def create_and_load_types_sqlite3(conn, types_data: list[dict]): + stmt = """CREATE TABLE types ( + "TextCol" TEXT, + "DateCol" TEXT, + "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, + "FloatCol" REAL, + "IntCol" INTEGER, + "BoolCol" INTEGER, + "IntColWithNull" INTEGER, + "BoolColWithNull" INTEGER + )""" + + ins_stmt = """ + INSERT INTO types + VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?) + """ + + if isinstance(conn, sqlite3.Connection): + cur = conn.cursor() + cur.execute(stmt) + cur.executemany(ins_stmt, types_data) + else: + with conn.cursor() as cur: + cur.execute(stmt) + cur.executemany(ins_stmt, types_data) + + conn.commit() + + +def create_and_load_types_postgresql(conn, types_data: list[dict]): + with conn.cursor() as cur: + stmt = """CREATE TABLE types ( + "TextCol" TEXT, + "DateCol" TIMESTAMP, + "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, + "FloatCol" DOUBLE PRECISION, + "IntCol" INTEGER, + "BoolCol" BOOLEAN, + "IntColWithNull" INTEGER, + "BoolColWithNull" BOOLEAN + )""" + cur.execute(stmt) + + stmt = """ + INSERT INTO types + VALUES($1, $2::timestamp, $3, $4, $5, $6, $7, $8, $9) + """ + + cur.executemany(stmt, types_data) + + conn.commit() + + +def create_and_load_types(conn, types_data: list[dict], dialect: str): + from sqlalchemy import insert + from sqlalchemy.engine import Engine + + types = types_table_metadata(dialect) + + stmt = insert(types).values(types_data) + if isinstance(conn, Engine): + with conn.connect() as conn: + with conn.begin(): + types.drop(conn, checkfirst=True) + types.create(bind=conn) + conn.execute(stmt) + else: + with conn.begin(): + types.drop(conn, checkfirst=True) + types.create(bind=conn) + conn.execute(stmt) + + +def create_and_load_postgres_datetz(conn): + from sqlalchemy import ( + Column, + DateTime, + MetaData, + Table, + insert, + ) + from sqlalchemy.engine import Engine + + metadata = MetaData() + datetz = Table("datetz", metadata, Column("DateColWithTz", DateTime(timezone=True))) + datetz_data = [ + { + "DateColWithTz": "2000-01-01 00:00:00-08:00", + }, + { + "DateColWithTz": "2000-06-01 00:00:00-07:00", + }, + ] + stmt = insert(datetz).values(datetz_data) + if isinstance(conn, Engine): + with conn.connect() as conn: + with conn.begin(): + datetz.drop(conn, checkfirst=True) + datetz.create(bind=conn) + conn.execute(stmt) + else: + with conn.begin(): + datetz.drop(conn, checkfirst=True) + datetz.create(bind=conn) + conn.execute(stmt) + + # "2000-01-01 00:00:00-08:00" should convert to + # "2000-01-01 08:00:00" + # "2000-06-01 00:00:00-07:00" should convert to + # "2000-06-01 07:00:00" + # GH 6415 + expected_data = [ + Timestamp("2000-01-01 08:00:00", tz="UTC"), + Timestamp("2000-06-01 07:00:00", tz="UTC"), + ] + return Series(expected_data, name="DateColWithTz") + + +def check_iris_frame(frame: DataFrame): + pytype = frame.dtypes.iloc[0].type + row = frame.iloc[0] + assert issubclass(pytype, np.floating) + tm.assert_series_equal( + row, Series([5.1, 3.5, 1.4, 0.2, "Iris-setosa"], index=frame.columns, name=0) + ) + assert frame.shape in ((150, 5), (8, 5)) + + +def count_rows(conn, table_name: str): + stmt = f"SELECT count(*) AS count_1 FROM {table_name}" + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if isinstance(conn, sqlite3.Connection): + cur = conn.cursor() + return cur.execute(stmt).fetchone()[0] + elif adbc and isinstance(conn, adbc.Connection): + with conn.cursor() as cur: + cur.execute(stmt) + return cur.fetchone()[0] + else: + from sqlalchemy import create_engine + from sqlalchemy.engine import Engine + + if isinstance(conn, str): + try: + engine = create_engine(conn) + with engine.connect() as conn: + return conn.exec_driver_sql(stmt).scalar_one() + finally: + engine.dispose() + elif isinstance(conn, Engine): + with conn.connect() as conn: + return conn.exec_driver_sql(stmt).scalar_one() + else: + return conn.exec_driver_sql(stmt).scalar_one() + + +@pytest.fixture +def iris_path(datapath): + iris_path = datapath("io", "data", "csv", "iris.csv") + return Path(iris_path) + + +@pytest.fixture +def types_data(): + return [ + { + "TextCol": "first", + "DateCol": "2000-01-03 00:00:00", + "IntDateCol": 535852800, + "IntDateOnlyCol": 20101010, + "FloatCol": 10.10, + "IntCol": 1, + "BoolCol": False, + "IntColWithNull": 1, + "BoolColWithNull": False, + }, + { + "TextCol": "first", + "DateCol": "2000-01-04 00:00:00", + "IntDateCol": 1356998400, + "IntDateOnlyCol": 20101212, + "FloatCol": 10.10, + "IntCol": 1, + "BoolCol": False, + "IntColWithNull": None, + "BoolColWithNull": None, + }, + ] + + +@pytest.fixture +def types_data_frame(types_data): + dtypes = { + "TextCol": "str", + "DateCol": "str", + "IntDateCol": "int64", + "IntDateOnlyCol": "int64", + "FloatCol": "float", + "IntCol": "int64", + "BoolCol": "int64", + "IntColWithNull": "float", + "BoolColWithNull": "float", + } + df = DataFrame(types_data) + return df[dtypes.keys()].astype(dtypes) + + +@pytest.fixture +def test_frame1(): + columns = ["index", "A", "B", "C", "D"] + data = [ + ( + "2000-01-03 00:00:00", + 0.980268513777, + 3.68573087906, + -0.364216805298, + -1.15973806169, + ), + ( + "2000-01-04 00:00:00", + 1.04791624281, + -0.0412318367011, + -0.16181208307, + 0.212549316967, + ), + ( + "2000-01-05 00:00:00", + 0.498580885705, + 0.731167677815, + -0.537677223318, + 1.34627041952, + ), + ( + "2000-01-06 00:00:00", + 1.12020151869, + 1.56762092543, + 0.00364077397681, + 0.67525259227, + ), + ] + return DataFrame(data, columns=columns) + + +@pytest.fixture +def test_frame3(): + columns = ["index", "A", "B"] + data = [ + ("2000-01-03 00:00:00", 2**31 - 1, -1.987670), + ("2000-01-04 00:00:00", -29, -0.0412318367011), + ("2000-01-05 00:00:00", 20000, 0.731167677815), + ("2000-01-06 00:00:00", -290867, 1.56762092543), + ] + return DataFrame(data, columns=columns) + + +def get_all_views(conn): + if isinstance(conn, sqlite3.Connection): + c = conn.execute("SELECT name FROM sqlite_master WHERE type='view'") + return [view[0] for view in c.fetchall()] + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if adbc and isinstance(conn, adbc.Connection): + results = [] + info = conn.adbc_get_objects().read_all().to_pylist() + for catalog in info: + catalog["catalog_name"] + for schema in catalog["catalog_db_schemas"]: + schema["db_schema_name"] + for table in schema["db_schema_tables"]: + if table["table_type"] == "view": + view_name = table["table_name"] + results.append(view_name) + + return results + else: + from sqlalchemy import inspect + + return inspect(conn).get_view_names() + + +def get_all_tables(conn): + if isinstance(conn, sqlite3.Connection): + c = conn.execute("SELECT name FROM sqlite_master WHERE type='table'") + return [table[0] for table in c.fetchall()] + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + + if adbc and isinstance(conn, adbc.Connection): + results = [] + info = conn.adbc_get_objects().read_all().to_pylist() + for catalog in info: + for schema in catalog["catalog_db_schemas"]: + for table in schema["db_schema_tables"]: + if table["table_type"] == "table": + table_name = table["table_name"] + results.append(table_name) + + return results + else: + from sqlalchemy import inspect + + return inspect(conn).get_table_names() + + +def drop_table( + table_name: str, + conn: sqlite3.Connection | sqlalchemy.engine.Engine | sqlalchemy.engine.Connection, +): + if isinstance(conn, sqlite3.Connection): + conn.execute(f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}") + conn.commit() + + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if adbc and isinstance(conn, adbc.Connection): + with conn.cursor() as cur: + cur.execute(f'DROP TABLE IF EXISTS "{table_name}"') + else: + with conn.begin() as con: + with sql.SQLDatabase(con) as db: + db.drop_table(table_name) + + +def drop_view( + view_name: str, + conn: sqlite3.Connection | sqlalchemy.engine.Engine | sqlalchemy.engine.Connection, +): + import sqlalchemy + + if isinstance(conn, sqlite3.Connection): + conn.execute(f"DROP VIEW IF EXISTS {sql._get_valid_sqlite_name(view_name)}") + conn.commit() + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if adbc and isinstance(conn, adbc.Connection): + with conn.cursor() as cur: + cur.execute(f'DROP VIEW IF EXISTS "{view_name}"') + else: + quoted_view = conn.engine.dialect.identifier_preparer.quote_identifier( + view_name + ) + stmt = sqlalchemy.text(f"DROP VIEW IF EXISTS {quoted_view}") + with conn.begin() as con: + con.execute(stmt) # type: ignore[union-attr] + + +@pytest.fixture +def mysql_pymysql_engine(): + sqlalchemy = pytest.importorskip("sqlalchemy") + pymysql = pytest.importorskip("pymysql") + engine = sqlalchemy.create_engine( + "mysql+pymysql://root@localhost:3306/pandas", + connect_args={"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}, + poolclass=sqlalchemy.pool.NullPool, + ) + yield engine + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) + engine.dispose() + + +@pytest.fixture +def mysql_pymysql_engine_iris(mysql_pymysql_engine, iris_path): + create_and_load_iris(mysql_pymysql_engine, iris_path) + create_and_load_iris_view(mysql_pymysql_engine) + yield mysql_pymysql_engine + + +@pytest.fixture +def mysql_pymysql_engine_types(mysql_pymysql_engine, types_data): + create_and_load_types(mysql_pymysql_engine, types_data, "mysql") + yield mysql_pymysql_engine + + +@pytest.fixture +def mysql_pymysql_conn(mysql_pymysql_engine): + with mysql_pymysql_engine.connect() as conn: + yield conn + + +@pytest.fixture +def mysql_pymysql_conn_iris(mysql_pymysql_engine_iris): + with mysql_pymysql_engine_iris.connect() as conn: + yield conn + + +@pytest.fixture +def mysql_pymysql_conn_types(mysql_pymysql_engine_types): + with mysql_pymysql_engine_types.connect() as conn: + yield conn + + +@pytest.fixture +def postgresql_psycopg2_engine(): + sqlalchemy = pytest.importorskip("sqlalchemy") + pytest.importorskip("psycopg2") + engine = sqlalchemy.create_engine( + "postgresql+psycopg2://postgres:postgres@localhost:5432/pandas", + poolclass=sqlalchemy.pool.NullPool, + ) + yield engine + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) + engine.dispose() + + +@pytest.fixture +def postgresql_psycopg2_engine_iris(postgresql_psycopg2_engine, iris_path): + create_and_load_iris(postgresql_psycopg2_engine, iris_path) + create_and_load_iris_view(postgresql_psycopg2_engine) + yield postgresql_psycopg2_engine + + +@pytest.fixture +def postgresql_psycopg2_engine_types(postgresql_psycopg2_engine, types_data): + create_and_load_types(postgresql_psycopg2_engine, types_data, "postgres") + yield postgresql_psycopg2_engine + + +@pytest.fixture +def postgresql_psycopg2_conn(postgresql_psycopg2_engine): + with postgresql_psycopg2_engine.connect() as conn: + yield conn + + +@pytest.fixture +def postgresql_adbc_conn(): + pytest.importorskip("adbc_driver_postgresql") + from adbc_driver_postgresql import dbapi + + uri = "postgresql://postgres:postgres@localhost:5432/pandas" + with dbapi.connect(uri) as conn: + yield conn + for view in get_all_views(conn): + drop_view(view, conn) + for tbl in get_all_tables(conn): + drop_table(tbl, conn) + conn.commit() + + +@pytest.fixture +def postgresql_adbc_iris(postgresql_adbc_conn, iris_path): + import adbc_driver_manager as mgr + + conn = postgresql_adbc_conn + + try: + conn.adbc_get_table_schema("iris") + except mgr.ProgrammingError: + conn.rollback() + create_and_load_iris_postgresql(conn, iris_path) + try: + conn.adbc_get_table_schema("iris_view") + except mgr.ProgrammingError: # note arrow-adbc issue 1022 + conn.rollback() + create_and_load_iris_view(conn) + yield conn + + +@pytest.fixture +def postgresql_adbc_types(postgresql_adbc_conn, types_data): + import adbc_driver_manager as mgr + + conn = postgresql_adbc_conn + + try: + conn.adbc_get_table_schema("types") + except mgr.ProgrammingError: + conn.rollback() + new_data = [tuple(entry.values()) for entry in types_data] + + create_and_load_types_postgresql(conn, new_data) + + yield conn + + +@pytest.fixture +def postgresql_psycopg2_conn_iris(postgresql_psycopg2_engine_iris): + with postgresql_psycopg2_engine_iris.connect() as conn: + yield conn + + +@pytest.fixture +def postgresql_psycopg2_conn_types(postgresql_psycopg2_engine_types): + with postgresql_psycopg2_engine_types.connect() as conn: + yield conn + + +@pytest.fixture +def sqlite_str(): + pytest.importorskip("sqlalchemy") + with tm.ensure_clean() as name: + yield f"sqlite:///{name}" + + +@pytest.fixture +def sqlite_engine(sqlite_str): + sqlalchemy = pytest.importorskip("sqlalchemy") + engine = sqlalchemy.create_engine(sqlite_str, poolclass=sqlalchemy.pool.NullPool) + yield engine + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) + engine.dispose() + + +@pytest.fixture +def sqlite_conn(sqlite_engine): + with sqlite_engine.connect() as conn: + yield conn + + +@pytest.fixture +def sqlite_str_iris(sqlite_str, iris_path): + sqlalchemy = pytest.importorskip("sqlalchemy") + engine = sqlalchemy.create_engine(sqlite_str) + create_and_load_iris(engine, iris_path) + create_and_load_iris_view(engine) + engine.dispose() + return sqlite_str + + +@pytest.fixture +def sqlite_engine_iris(sqlite_engine, iris_path): + create_and_load_iris(sqlite_engine, iris_path) + create_and_load_iris_view(sqlite_engine) + yield sqlite_engine + + +@pytest.fixture +def sqlite_conn_iris(sqlite_engine_iris): + with sqlite_engine_iris.connect() as conn: + yield conn + + +@pytest.fixture +def sqlite_str_types(sqlite_str, types_data): + sqlalchemy = pytest.importorskip("sqlalchemy") + engine = sqlalchemy.create_engine(sqlite_str) + create_and_load_types(engine, types_data, "sqlite") + engine.dispose() + return sqlite_str + + +@pytest.fixture +def sqlite_engine_types(sqlite_engine, types_data): + create_and_load_types(sqlite_engine, types_data, "sqlite") + yield sqlite_engine + + +@pytest.fixture +def sqlite_conn_types(sqlite_engine_types): + with sqlite_engine_types.connect() as conn: + yield conn + + +@pytest.fixture +def sqlite_adbc_conn(): + pytest.importorskip("adbc_driver_sqlite") + from adbc_driver_sqlite import dbapi + + with tm.ensure_clean() as name: + uri = f"file:{name}" + with dbapi.connect(uri) as conn: + yield conn + for view in get_all_views(conn): + drop_view(view, conn) + for tbl in get_all_tables(conn): + drop_table(tbl, conn) + conn.commit() + + +@pytest.fixture +def sqlite_adbc_iris(sqlite_adbc_conn, iris_path): + import adbc_driver_manager as mgr + + conn = sqlite_adbc_conn + try: + conn.adbc_get_table_schema("iris") + except mgr.ProgrammingError: + conn.rollback() + create_and_load_iris_sqlite3(conn, iris_path) + try: + conn.adbc_get_table_schema("iris_view") + except mgr.ProgrammingError: + conn.rollback() + create_and_load_iris_view(conn) + yield conn + + +@pytest.fixture +def sqlite_adbc_types(sqlite_adbc_conn, types_data): + import adbc_driver_manager as mgr + + conn = sqlite_adbc_conn + try: + conn.adbc_get_table_schema("types") + except mgr.ProgrammingError: + conn.rollback() + new_data = [] + for entry in types_data: + entry["BoolCol"] = int(entry["BoolCol"]) + if entry["BoolColWithNull"] is not None: + entry["BoolColWithNull"] = int(entry["BoolColWithNull"]) + new_data.append(tuple(entry.values())) + + create_and_load_types_sqlite3(conn, new_data) + conn.commit() + + yield conn + + +@pytest.fixture +def sqlite_buildin(): + with contextlib.closing(sqlite3.connect(":memory:")) as closing_conn: + with closing_conn as conn: + yield conn + + +@pytest.fixture +def sqlite_buildin_iris(sqlite_buildin, iris_path): + create_and_load_iris_sqlite3(sqlite_buildin, iris_path) + create_and_load_iris_view(sqlite_buildin) + yield sqlite_buildin + + +@pytest.fixture +def sqlite_buildin_types(sqlite_buildin, types_data): + types_data = [tuple(entry.values()) for entry in types_data] + create_and_load_types_sqlite3(sqlite_buildin, types_data) + yield sqlite_buildin + + +mysql_connectable = [ + pytest.param("mysql_pymysql_engine", marks=pytest.mark.db), + pytest.param("mysql_pymysql_conn", marks=pytest.mark.db), +] + +mysql_connectable_iris = [ + pytest.param("mysql_pymysql_engine_iris", marks=pytest.mark.db), + pytest.param("mysql_pymysql_conn_iris", marks=pytest.mark.db), +] + +mysql_connectable_types = [ + pytest.param("mysql_pymysql_engine_types", marks=pytest.mark.db), + pytest.param("mysql_pymysql_conn_types", marks=pytest.mark.db), +] + +postgresql_connectable = [ + pytest.param("postgresql_psycopg2_engine", marks=pytest.mark.db), + pytest.param("postgresql_psycopg2_conn", marks=pytest.mark.db), +] + +postgresql_connectable_iris = [ + pytest.param("postgresql_psycopg2_engine_iris", marks=pytest.mark.db), + pytest.param("postgresql_psycopg2_conn_iris", marks=pytest.mark.db), +] + +postgresql_connectable_types = [ + pytest.param("postgresql_psycopg2_engine_types", marks=pytest.mark.db), + pytest.param("postgresql_psycopg2_conn_types", marks=pytest.mark.db), +] + +sqlite_connectable = [ + "sqlite_engine", + "sqlite_conn", + "sqlite_str", +] + +sqlite_connectable_iris = [ + "sqlite_engine_iris", + "sqlite_conn_iris", + "sqlite_str_iris", +] + +sqlite_connectable_types = [ + "sqlite_engine_types", + "sqlite_conn_types", + "sqlite_str_types", +] + +sqlalchemy_connectable = mysql_connectable + postgresql_connectable + sqlite_connectable + +sqlalchemy_connectable_iris = ( + mysql_connectable_iris + postgresql_connectable_iris + sqlite_connectable_iris +) + +sqlalchemy_connectable_types = ( + mysql_connectable_types + postgresql_connectable_types + sqlite_connectable_types +) + +adbc_connectable = [ + "sqlite_adbc_conn", + pytest.param("postgresql_adbc_conn", marks=pytest.mark.db), +] + +adbc_connectable_iris = [ + pytest.param("postgresql_adbc_iris", marks=pytest.mark.db), + pytest.param("sqlite_adbc_iris", marks=pytest.mark.db), +] + +adbc_connectable_types = [ + pytest.param("postgresql_adbc_types", marks=pytest.mark.db), + pytest.param("sqlite_adbc_types", marks=pytest.mark.db), +] + + +all_connectable = sqlalchemy_connectable + ["sqlite_buildin"] + adbc_connectable + +all_connectable_iris = ( + sqlalchemy_connectable_iris + ["sqlite_buildin_iris"] + adbc_connectable_iris +) + +all_connectable_types = ( + sqlalchemy_connectable_types + ["sqlite_buildin_types"] + adbc_connectable_types +) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql(conn, test_frame1, request): + # GH 51086 if conn is sqlite_engine + conn = request.getfixturevalue(conn) + test_frame1.to_sql(name="test", con=conn, if_exists="append", index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql_empty(conn, test_frame1, request): + if conn == "postgresql_adbc_conn": + request.node.add_marker( + pytest.mark.xfail( + reason="postgres ADBC driver cannot insert index with null type", + strict=True, + ) + ) + # GH 51086 if conn is sqlite_engine + conn = request.getfixturevalue(conn) + empty_df = test_frame1.iloc[:0] + empty_df.to_sql(name="test", con=conn, if_exists="append", index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql_arrow_dtypes(conn, request): + # GH 52046 + pytest.importorskip("pyarrow") + df = DataFrame( + { + "int": pd.array([1], dtype="int8[pyarrow]"), + "datetime": pd.array( + [datetime(2023, 1, 1)], dtype="timestamp[ns][pyarrow]" + ), + "date": pd.array([date(2023, 1, 1)], dtype="date32[day][pyarrow]"), + "timedelta": pd.array([timedelta(1)], dtype="duration[ns][pyarrow]"), + "string": pd.array(["a"], dtype="string[pyarrow]"), + } + ) + + if "adbc" in conn: + if conn == "sqlite_adbc_conn": + df = df.drop(columns=["timedelta"]) + if pa_version_under14p1: + exp_warning = DeprecationWarning + msg = "is_sparse is deprecated" + else: + exp_warning = None + msg = "" + else: + exp_warning = UserWarning + msg = "the 'timedelta'" + + conn = request.getfixturevalue(conn) + with tm.assert_produces_warning(exp_warning, match=msg, check_stacklevel=False): + df.to_sql(name="test_arrow", con=conn, if_exists="replace", index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture): + # GH 52046 + pytest.importorskip("pyarrow") + df = DataFrame( + { + "datetime": pd.array( + [datetime(2023, 1, 1), nulls_fixture], dtype="timestamp[ns][pyarrow]" + ), + } + ) + conn = request.getfixturevalue(conn) + df.to_sql(name="test_arrow", con=conn, if_exists="replace", index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("method", [None, "multi"]) +def test_to_sql(conn, method, test_frame1, request): + if method == "multi" and "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'method' not implemented for ADBC drivers", strict=True + ) + ) + + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn, need_transaction=True) as pandasSQL: + pandasSQL.to_sql(test_frame1, "test_frame", method=method) + assert pandasSQL.has_table("test_frame") + assert count_rows(conn, "test_frame") == len(test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("mode, num_row_coef", [("replace", 1), ("append", 2)]) +def test_to_sql_exist(conn, mode, num_row_coef, test_frame1, request): + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn, need_transaction=True) as pandasSQL: + pandasSQL.to_sql(test_frame1, "test_frame", if_exists="fail") + pandasSQL.to_sql(test_frame1, "test_frame", if_exists=mode) + assert pandasSQL.has_table("test_frame") + assert count_rows(conn, "test_frame") == num_row_coef * len(test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_exist_fail(conn, test_frame1, request): + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn, need_transaction=True) as pandasSQL: + pandasSQL.to_sql(test_frame1, "test_frame", if_exists="fail") + assert pandasSQL.has_table("test_frame") + + msg = "Table 'test_frame' already exists" + with pytest.raises(ValueError, match=msg): + pandasSQL.to_sql(test_frame1, "test_frame", if_exists="fail") + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_iris_query(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = read_sql_query("SELECT * FROM iris", conn) + check_iris_frame(iris_frame) + iris_frame = pd.read_sql("SELECT * FROM iris", conn) + check_iris_frame(iris_frame) + iris_frame = pd.read_sql("SELECT * FROM iris where 0=1", conn) + assert iris_frame.shape == (0, 5) + assert "SepalWidth" in iris_frame.columns + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_iris_query_chunksize(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'chunksize' not implemented for ADBC drivers", + strict=True, + ) + ) + conn = request.getfixturevalue(conn) + iris_frame = concat(read_sql_query("SELECT * FROM iris", conn, chunksize=7)) + check_iris_frame(iris_frame) + iris_frame = concat(pd.read_sql("SELECT * FROM iris", conn, chunksize=7)) + check_iris_frame(iris_frame) + iris_frame = concat(pd.read_sql("SELECT * FROM iris where 0=1", conn, chunksize=7)) + assert iris_frame.shape == (0, 5) + assert "SepalWidth" in iris_frame.columns + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_read_iris_query_expression_with_parameter(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'chunksize' not implemented for ADBC drivers", + strict=True, + ) + ) + conn = request.getfixturevalue(conn) + from sqlalchemy import ( + MetaData, + Table, + create_engine, + select, + ) + + metadata = MetaData() + autoload_con = create_engine(conn) if isinstance(conn, str) else conn + iris = Table("iris", metadata, autoload_with=autoload_con) + iris_frame = read_sql_query( + select(iris), conn, params={"name": "Iris-setosa", "length": 5.1} + ) + check_iris_frame(iris_frame) + if isinstance(conn, str): + autoload_con.dispose() + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_iris_query_string_with_parameter(conn, request, sql_strings): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'chunksize' not implemented for ADBC drivers", + strict=True, + ) + ) + + for db, query in sql_strings["read_parameters"].items(): + if db in conn: + break + else: + raise KeyError(f"No part of {conn} found in sql_strings['read_parameters']") + conn = request.getfixturevalue(conn) + iris_frame = read_sql_query(query, conn, params=("Iris-setosa", 5.1)) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_read_iris_table(conn, request): + # GH 51015 if conn = sqlite_iris_str + conn = request.getfixturevalue(conn) + iris_frame = read_sql_table("iris", conn) + check_iris_frame(iris_frame) + iris_frame = pd.read_sql("iris", conn) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_read_iris_table_chunksize(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + iris_frame = concat(read_sql_table("iris", conn, chunksize=7)) + check_iris_frame(iris_frame) + iris_frame = concat(pd.read_sql("iris", conn, chunksize=7)) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_to_sql_callable(conn, test_frame1, request): + conn = request.getfixturevalue(conn) + + check = [] # used to double check function below is really being used + + def sample(pd_table, conn, keys, data_iter): + check.append(1) + data = [dict(zip(keys, row)) for row in data_iter] + conn.execute(pd_table.table.insert(), data) + + with pandasSQL_builder(conn, need_transaction=True) as pandasSQL: + pandasSQL.to_sql(test_frame1, "test_frame", method=sample) + assert pandasSQL.has_table("test_frame") + assert check == [1] + assert count_rows(conn, "test_frame") == len(test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable_types) +def test_default_type_conversion(conn, request): + conn_name = conn + if conn_name == "sqlite_buildin_types": + request.applymarker( + pytest.mark.xfail( + reason="sqlite_buildin connection does not implement read_sql_table" + ) + ) + + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + + assert issubclass(df.FloatCol.dtype.type, np.floating) + assert issubclass(df.IntCol.dtype.type, np.integer) + + # MySQL/sqlite has no real BOOL type + if "postgresql" in conn_name: + assert issubclass(df.BoolCol.dtype.type, np.bool_) + else: + assert issubclass(df.BoolCol.dtype.type, np.integer) + + # Int column with NA values stays as float + assert issubclass(df.IntColWithNull.dtype.type, np.floating) + + # Bool column with NA = int column with NA values => becomes float + if "postgresql" in conn_name: + assert issubclass(df.BoolColWithNull.dtype.type, object) + else: + assert issubclass(df.BoolColWithNull.dtype.type, np.floating) + + +@pytest.mark.parametrize("conn", mysql_connectable) +def test_read_procedure(conn, request): + conn = request.getfixturevalue(conn) + + # GH 7324 + # Although it is more an api test, it is added to the + # mysql tests as sqlite does not have stored procedures + from sqlalchemy import text + from sqlalchemy.engine import Engine + + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + df.to_sql(name="test_frame", con=conn, index=False) + + proc = """DROP PROCEDURE IF EXISTS get_testdb; + + CREATE PROCEDURE get_testdb () + + BEGIN + SELECT * FROM test_frame; + END""" + proc = text(proc) + if isinstance(conn, Engine): + with conn.connect() as engine_conn: + with engine_conn.begin(): + engine_conn.execute(proc) + else: + with conn.begin(): + conn.execute(proc) + + res1 = sql.read_sql_query("CALL get_testdb();", conn) + tm.assert_frame_equal(df, res1) + + # test delegation to read_sql_query + res2 = sql.read_sql("CALL get_testdb();", conn) + tm.assert_frame_equal(df, res2) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +@pytest.mark.parametrize("expected_count", [2, "Success!"]) +def test_copy_from_callable_insertion_method(conn, expected_count, request): + # GH 8953 + # Example in io.rst found under _io.sql.method + # not available in sqlite, mysql + def psql_insert_copy(table, conn, keys, data_iter): + # gets a DBAPI connection that can provide a cursor + dbapi_conn = conn.connection + with dbapi_conn.cursor() as cur: + s_buf = StringIO() + writer = csv.writer(s_buf) + writer.writerows(data_iter) + s_buf.seek(0) + + columns = ", ".join([f'"{k}"' for k in keys]) + if table.schema: + table_name = f"{table.schema}.{table.name}" + else: + table_name = table.name + + sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV" + cur.copy_expert(sql=sql_query, file=s_buf) + return expected_count + + conn = request.getfixturevalue(conn) + expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]}) + result_count = expected.to_sql( + name="test_frame", con=conn, index=False, method=psql_insert_copy + ) + # GH 46891 + if expected_count is None: + assert result_count is None + else: + assert result_count == expected_count + result = sql.read_sql_table("test_frame", conn) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_insertion_method_on_conflict_do_nothing(conn, request): + # GH 15988: Example in to_sql docstring + conn = request.getfixturevalue(conn) + + from sqlalchemy.dialects.postgresql import insert + from sqlalchemy.engine import Engine + from sqlalchemy.sql import text + + def insert_on_conflict(table, conn, keys, data_iter): + data = [dict(zip(keys, row)) for row in data_iter] + stmt = ( + insert(table.table) + .values(data) + .on_conflict_do_nothing(index_elements=["a"]) + ) + result = conn.execute(stmt) + return result.rowcount + + create_sql = text( + """ + CREATE TABLE test_insert_conflict ( + a integer PRIMARY KEY, + b numeric, + c text + ); + """ + ) + if isinstance(conn, Engine): + with conn.connect() as con: + with con.begin(): + con.execute(create_sql) + else: + with conn.begin(): + conn.execute(create_sql) + + expected = DataFrame([[1, 2.1, "a"]], columns=list("abc")) + expected.to_sql( + name="test_insert_conflict", con=conn, if_exists="append", index=False + ) + + df_insert = DataFrame([[1, 3.2, "b"]], columns=list("abc")) + inserted = df_insert.to_sql( + name="test_insert_conflict", + con=conn, + index=False, + if_exists="append", + method=insert_on_conflict, + ) + result = sql.read_sql_table("test_insert_conflict", conn) + tm.assert_frame_equal(result, expected) + assert inserted == 0 + + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_insert_conflict") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_on_public_schema(conn, request): + if "sqlite" in conn or "mysql" in conn: + request.applymarker( + pytest.mark.xfail( + reason="test for public schema only specific to postgresql" + ) + ) + + conn = request.getfixturevalue(conn) + + test_data = DataFrame([[1, 2.1, "a"], [2, 3.1, "b"]], columns=list("abc")) + test_data.to_sql( + name="test_public_schema", + con=conn, + if_exists="append", + index=False, + schema="public", + ) + + df_out = sql.read_sql_table("test_public_schema", conn, schema="public") + tm.assert_frame_equal(test_data, df_out) + + +@pytest.mark.parametrize("conn", mysql_connectable) +def test_insertion_method_on_conflict_update(conn, request): + # GH 14553: Example in to_sql docstring + conn = request.getfixturevalue(conn) + + from sqlalchemy.dialects.mysql import insert + from sqlalchemy.engine import Engine + from sqlalchemy.sql import text + + def insert_on_conflict(table, conn, keys, data_iter): + data = [dict(zip(keys, row)) for row in data_iter] + stmt = insert(table.table).values(data) + stmt = stmt.on_duplicate_key_update(b=stmt.inserted.b, c=stmt.inserted.c) + result = conn.execute(stmt) + return result.rowcount + + create_sql = text( + """ + CREATE TABLE test_insert_conflict ( + a INT PRIMARY KEY, + b FLOAT, + c VARCHAR(10) + ); + """ + ) + if isinstance(conn, Engine): + with conn.connect() as con: + with con.begin(): + con.execute(create_sql) + else: + with conn.begin(): + conn.execute(create_sql) + + df = DataFrame([[1, 2.1, "a"]], columns=list("abc")) + df.to_sql(name="test_insert_conflict", con=conn, if_exists="append", index=False) + + expected = DataFrame([[1, 3.2, "b"]], columns=list("abc")) + inserted = expected.to_sql( + name="test_insert_conflict", + con=conn, + index=False, + if_exists="append", + method=insert_on_conflict, + ) + result = sql.read_sql_table("test_insert_conflict", conn) + tm.assert_frame_equal(result, expected) + assert inserted == 2 + + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_insert_conflict") + + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_read_view_postgres(conn, request): + # GH 52969 + conn = request.getfixturevalue(conn) + + from sqlalchemy.engine import Engine + from sqlalchemy.sql import text + + table_name = f"group_{uuid.uuid4().hex}" + view_name = f"group_view_{uuid.uuid4().hex}" + + sql_stmt = text( + f""" + CREATE TABLE {table_name} ( + group_id INTEGER, + name TEXT + ); + INSERT INTO {table_name} VALUES + (1, 'name'); + CREATE VIEW {view_name} + AS + SELECT * FROM {table_name}; + """ + ) + if isinstance(conn, Engine): + with conn.connect() as con: + with con.begin(): + con.execute(sql_stmt) + else: + with conn.begin(): + conn.execute(sql_stmt) + result = read_sql_table(view_name, conn) + expected = DataFrame({"group_id": [1], "name": "name"}) + tm.assert_frame_equal(result, expected) + + +def test_read_view_sqlite(sqlite_buildin): + # GH 52969 + create_table = """ +CREATE TABLE groups ( + group_id INTEGER, + name TEXT +); +""" + insert_into = """ +INSERT INTO groups VALUES + (1, 'name'); +""" + create_view = """ +CREATE VIEW group_view +AS +SELECT * FROM groups; +""" + sqlite_buildin.execute(create_table) + sqlite_buildin.execute(insert_into) + sqlite_buildin.execute(create_view) + result = pd.read_sql("SELECT * FROM group_view", sqlite_buildin) + expected = DataFrame({"group_id": [1], "name": "name"}) + tm.assert_frame_equal(result, expected) + + +def test_execute_typeerror(sqlite_engine_iris): + with pytest.raises(TypeError, match="pandas.io.sql.execute requires a connection"): + with tm.assert_produces_warning( + FutureWarning, + match="`pandas.io.sql.execute` is deprecated and " + "will be removed in the future version.", + ): + sql.execute("select * from iris", sqlite_engine_iris) + + +def test_execute_deprecated(sqlite_conn_iris): + # GH50185 + with tm.assert_produces_warning( + FutureWarning, + match="`pandas.io.sql.execute` is deprecated and " + "will be removed in the future version.", + ): + sql.execute("select * from iris", sqlite_conn_iris) + + +def flavor(conn_name): + if "postgresql" in conn_name: + return "postgresql" + elif "sqlite" in conn_name: + return "sqlite" + elif "mysql" in conn_name: + return "mysql" + + raise ValueError(f"unsupported connection: {conn_name}") + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_iris_parameter(conn, request, sql_strings): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'params' not implemented for ADBC drivers", + strict=True, + ) + ) + conn_name = conn + conn = request.getfixturevalue(conn) + query = sql_strings["read_parameters"][flavor(conn_name)] + params = ("Iris-setosa", 5.1) + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=params) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_iris_named_parameter(conn, request, sql_strings): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'params' not implemented for ADBC drivers", + strict=True, + ) + ) + + conn_name = conn + conn = request.getfixturevalue(conn) + query = sql_strings["read_named_parameters"][flavor(conn_name)] + params = {"name": "Iris-setosa", "length": 5.1} + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=params) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_iris_no_parameter_with_percent(conn, request, sql_strings): + if "mysql" in conn or ("postgresql" in conn and "adbc" not in conn): + request.applymarker(pytest.mark.xfail(reason="broken test")) + + conn_name = conn + conn = request.getfixturevalue(conn) + + query = sql_strings["read_no_parameters_with_percent"][flavor(conn_name)] + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=None) + check_iris_frame(iris_frame) + + +# ----------------------------------------------------------------------------- +# -- Testing the public API + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_api_read_sql_view(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = sql.read_sql_query("SELECT * FROM iris_view", conn) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_api_read_sql_with_chunksize_no_result(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + query = 'SELECT * FROM iris_view WHERE "SepalLength" < 0.0' + with_batch = sql.read_sql_query(query, conn, chunksize=5) + without_batch = sql.read_sql_query(query, conn) + tm.assert_frame_equal(concat(with_batch), without_batch) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame1", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame1") + + sql.to_sql(test_frame1, "test_frame1", conn) + assert sql.has_table("test_frame1", conn) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_fail(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame2", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame2") + + sql.to_sql(test_frame1, "test_frame2", conn, if_exists="fail") + assert sql.has_table("test_frame2", conn) + + msg = "Table 'test_frame2' already exists" + with pytest.raises(ValueError, match=msg): + sql.to_sql(test_frame1, "test_frame2", conn, if_exists="fail") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_replace(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame3", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame3") + + sql.to_sql(test_frame1, "test_frame3", conn, if_exists="fail") + # Add to table again + sql.to_sql(test_frame1, "test_frame3", conn, if_exists="replace") + assert sql.has_table("test_frame3", conn) + + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame3") + + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_append(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame4", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame4") + + assert sql.to_sql(test_frame1, "test_frame4", conn, if_exists="fail") == 4 + + # Add to table again + assert sql.to_sql(test_frame1, "test_frame4", conn, if_exists="append") == 4 + assert sql.has_table("test_frame4", conn) + + num_entries = 2 * len(test_frame1) + num_rows = count_rows(conn, "test_frame4") + + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_type_mapping(conn, request, test_frame3): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame5", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame5") + + sql.to_sql(test_frame3, "test_frame5", conn, index=False) + result = sql.read_sql("SELECT * FROM test_frame5", conn) + + tm.assert_frame_equal(test_frame3, result) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_series(conn, request): + conn = request.getfixturevalue(conn) + if sql.has_table("test_series", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_series") + + s = Series(np.arange(5, dtype="int64"), name="series") + sql.to_sql(s, "test_series", conn, index=False) + s2 = sql.read_sql_query("SELECT * FROM test_series", conn) + tm.assert_frame_equal(s.to_frame(), s2) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_roundtrip(conn, request, test_frame1): + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame_roundtrip", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame_roundtrip") + + sql.to_sql(test_frame1, "test_frame_roundtrip", con=conn) + result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=conn) + + # HACK! + if "adbc" in conn_name: + result = result.rename(columns={"__index_level_0__": "level_0"}) + result.index = test_frame1.index + result.set_index("level_0", inplace=True) + result.index.astype(int) + result.index.name = None + tm.assert_frame_equal(result, test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_roundtrip_chunksize(conn, request, test_frame1): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame_roundtrip", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame_roundtrip") + + sql.to_sql( + test_frame1, + "test_frame_roundtrip", + con=conn, + index=False, + chunksize=2, + ) + result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=conn) + tm.assert_frame_equal(result, test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_api_execute_sql(conn, request): + # drop_sql = "DROP TABLE IF EXISTS test" # should already be done + conn = request.getfixturevalue(conn) + with sql.pandasSQL_builder(conn) as pandas_sql: + iris_results = pandas_sql.execute("SELECT * FROM iris") + row = iris_results.fetchone() + iris_results.close() + assert list(row) == [5.1, 3.5, 1.4, 0.2, "Iris-setosa"] + + +@pytest.mark.parametrize("conn", all_connectable_types) +def test_api_date_parsing(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + # Test date parsing in read_sql + # No Parsing + df = sql.read_sql_query("SELECT * FROM types", conn) + if not ("mysql" in conn_name or "postgres" in conn_name): + assert not issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_query("SELECT * FROM types", conn, parse_dates=["DateCol"]) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + Timestamp(2000, 1, 3, 0, 0, 0), + Timestamp(2000, 1, 4, 0, 0, 0), + ] + + df = sql.read_sql_query( + "SELECT * FROM types", + conn, + parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}, + ) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + Timestamp(2000, 1, 3, 0, 0, 0), + Timestamp(2000, 1, 4, 0, 0, 0), + ] + + df = sql.read_sql_query("SELECT * FROM types", conn, parse_dates=["IntDateCol"]) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + Timestamp(1986, 12, 25, 0, 0, 0), + Timestamp(2013, 1, 1, 0, 0, 0), + ] + + df = sql.read_sql_query( + "SELECT * FROM types", conn, parse_dates={"IntDateCol": "s"} + ) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + Timestamp(1986, 12, 25, 0, 0, 0), + Timestamp(2013, 1, 1, 0, 0, 0), + ] + + df = sql.read_sql_query( + "SELECT * FROM types", + conn, + parse_dates={"IntDateOnlyCol": "%Y%m%d"}, + ) + assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64) + assert df.IntDateOnlyCol.tolist() == [ + Timestamp("2010-10-10"), + Timestamp("2010-12-12"), + ] + + +@pytest.mark.parametrize("conn", all_connectable_types) +@pytest.mark.parametrize("error", ["ignore", "raise", "coerce"]) +@pytest.mark.parametrize( + "read_sql, text, mode", + [ + (sql.read_sql, "SELECT * FROM types", ("sqlalchemy", "fallback")), + (sql.read_sql, "types", ("sqlalchemy")), + ( + sql.read_sql_query, + "SELECT * FROM types", + ("sqlalchemy", "fallback"), + ), + (sql.read_sql_table, "types", ("sqlalchemy")), + ], +) +def test_api_custom_dateparsing_error( + conn, request, read_sql, text, mode, error, types_data_frame +): + conn_name = conn + conn = request.getfixturevalue(conn) + if text == "types" and conn_name == "sqlite_buildin_types": + request.applymarker( + pytest.mark.xfail(reason="failing combination of arguments") + ) + + expected = types_data_frame.astype({"DateCol": "datetime64[ns]"}) + + result = read_sql( + text, + con=conn, + parse_dates={ + "DateCol": {"errors": error}, + }, + ) + if "postgres" in conn_name: + # TODO: clean up types_data_frame fixture + result["BoolCol"] = result["BoolCol"].astype(int) + result["BoolColWithNull"] = result["BoolColWithNull"].astype(float) + + if conn_name == "postgresql_adbc_types": + expected = expected.astype( + { + "IntDateCol": "int32", + "IntDateOnlyCol": "int32", + "IntCol": "int32", + } + ) + + if not pa_version_under13p0: + # TODO: is this astype safe? + expected["DateCol"] = expected["DateCol"].astype("datetime64[us]") + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable_types) +def test_api_date_and_index(conn, request): + # Test case where same column appears in parse_date and index_col + conn = request.getfixturevalue(conn) + df = sql.read_sql_query( + "SELECT * FROM types", + conn, + index_col="DateCol", + parse_dates=["DateCol", "IntDateCol"], + ) + + assert issubclass(df.index.dtype.type, np.datetime64) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_timedelta(conn, request): + # see #6921 + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_timedelta", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_timedelta") + + df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame() + + if conn_name == "sqlite_adbc_conn": + request.node.add_marker( + pytest.mark.xfail( + reason="sqlite ADBC driver doesn't implement timedelta", + ) + ) + + if "adbc" in conn_name: + if pa_version_under14p1: + exp_warning = DeprecationWarning + else: + exp_warning = None + else: + exp_warning = UserWarning + + with tm.assert_produces_warning(exp_warning, check_stacklevel=False): + result_count = df.to_sql(name="test_timedelta", con=conn) + assert result_count == 2 + result = sql.read_sql_query("SELECT * FROM test_timedelta", conn) + + if conn_name == "postgresql_adbc_conn": + # TODO: Postgres stores an INTERVAL, which ADBC reads as a Month-Day-Nano + # Interval; the default pandas type mapper maps this to a DateOffset + # but maybe we should try and restore the timedelta here? + expected = Series( + [ + pd.DateOffset(months=0, days=0, microseconds=1000000, nanoseconds=0), + pd.DateOffset(months=0, days=0, microseconds=3000000, nanoseconds=0), + ], + name="foo", + ) + else: + expected = df["foo"].astype("int64") + tm.assert_series_equal(result["foo"], expected) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_complex_raises(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame({"a": [1 + 1j, 2j]}) + + if "adbc" in conn_name: + msg = "datatypes not supported" + else: + msg = "Complex datatypes not supported" + with pytest.raises(ValueError, match=msg): + assert df.to_sql("test_complex", con=conn) is None + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize( + "index_name,index_label,expected", + [ + # no index name, defaults to 'index' + (None, None, "index"), + # specifying index_label + (None, "other_label", "other_label"), + # using the index name + ("index_name", None, "index_name"), + # has index name, but specifying index_label + ("index_name", "other_label", "other_label"), + # index name is integer + (0, None, "0"), + # index name is None but index label is integer + (None, 0, "0"), + ], +) +def test_api_to_sql_index_label(conn, request, index_name, index_label, expected): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="index_label argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + if sql.has_table("test_index_label", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_index_label") + + temp_frame = DataFrame({"col1": range(4)}) + temp_frame.index.name = index_name + query = "SELECT * FROM test_index_label" + sql.to_sql(temp_frame, "test_index_label", conn, index_label=index_label) + frame = sql.read_sql_query(query, conn) + assert frame.columns[0] == expected + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_index_label_multiindex(conn, request): + conn_name = conn + if "mysql" in conn_name: + request.applymarker( + pytest.mark.xfail( + reason="MySQL can fail using TEXT without length as key", strict=False + ) + ) + elif "adbc" in conn_name: + request.node.add_marker( + pytest.mark.xfail(reason="index_label argument NotImplemented with ADBC") + ) + + conn = request.getfixturevalue(conn) + if sql.has_table("test_index_label", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_index_label") + + expected_row_count = 4 + temp_frame = DataFrame( + {"col1": range(4)}, + index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]), + ) + + # no index name, defaults to 'level_0' and 'level_1' + result = sql.to_sql(temp_frame, "test_index_label", conn) + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[0] == "level_0" + assert frame.columns[1] == "level_1" + + # specifying index_label + result = sql.to_sql( + temp_frame, + "test_index_label", + conn, + if_exists="replace", + index_label=["A", "B"], + ) + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[:2].tolist() == ["A", "B"] + + # using the index name + temp_frame.index.names = ["A", "B"] + result = sql.to_sql(temp_frame, "test_index_label", conn, if_exists="replace") + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[:2].tolist() == ["A", "B"] + + # has index name, but specifying index_label + result = sql.to_sql( + temp_frame, + "test_index_label", + conn, + if_exists="replace", + index_label=["C", "D"], + ) + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[:2].tolist() == ["C", "D"] + + msg = "Length of 'index_label' should match number of levels, which is 2" + with pytest.raises(ValueError, match=msg): + sql.to_sql( + temp_frame, + "test_index_label", + conn, + if_exists="replace", + index_label="C", + ) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_multiindex_roundtrip(conn, request): + conn = request.getfixturevalue(conn) + if sql.has_table("test_multiindex_roundtrip", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_multiindex_roundtrip") + + df = DataFrame.from_records( + [(1, 2.1, "line1"), (2, 1.5, "line2")], + columns=["A", "B", "C"], + index=["A", "B"], + ) + + df.to_sql(name="test_multiindex_roundtrip", con=conn) + result = sql.read_sql_query( + "SELECT * FROM test_multiindex_roundtrip", conn, index_col=["A", "B"] + ) + tm.assert_frame_equal(df, result, check_index_type=True) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize( + "dtype", + [ + None, + int, + float, + {"A": int, "B": float}, + ], +) +def test_api_dtype_argument(conn, request, dtype): + # GH10285 Add dtype argument to read_sql_query + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_dtype_argument", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_dtype_argument") + + df = DataFrame([[1.2, 3.4], [5.6, 7.8]], columns=["A", "B"]) + assert df.to_sql(name="test_dtype_argument", con=conn) == 2 + + expected = df.astype(dtype) + + if "postgres" in conn_name: + query = 'SELECT "A", "B" FROM test_dtype_argument' + else: + query = "SELECT A, B FROM test_dtype_argument" + result = sql.read_sql_query(query, con=conn, dtype=dtype) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_integer_col_names(conn, request): + conn = request.getfixturevalue(conn) + df = DataFrame([[1, 2], [3, 4]], columns=[0, 1]) + sql.to_sql(df, "test_frame_integer_col_names", conn, if_exists="replace") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema(conn, request, test_frame1): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'get_schema' not implemented for ADBC drivers", + strict=True, + ) + ) + conn = request.getfixturevalue(conn) + create_sql = sql.get_schema(test_frame1, "test", con=conn) + assert "CREATE" in create_sql + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema_with_schema(conn, request, test_frame1): + # GH28486 + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'get_schema' not implemented for ADBC drivers", + strict=True, + ) + ) + conn = request.getfixturevalue(conn) + create_sql = sql.get_schema(test_frame1, "test", con=conn, schema="pypi") + assert "CREATE TABLE pypi." in create_sql + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema_dtypes(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'get_schema' not implemented for ADBC drivers", + strict=True, + ) + ) + conn_name = conn + conn = request.getfixturevalue(conn) + float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]}) + + if conn_name == "sqlite_buildin": + dtype = "INTEGER" + else: + from sqlalchemy import Integer + + dtype = Integer + create_sql = sql.get_schema(float_frame, "test", con=conn, dtype={"b": dtype}) + assert "CREATE" in create_sql + assert "INTEGER" in create_sql + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema_keys(conn, request, test_frame1): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'get_schema' not implemented for ADBC drivers", + strict=True, + ) + ) + conn_name = conn + conn = request.getfixturevalue(conn) + frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]}) + create_sql = sql.get_schema(frame, "test", con=conn, keys="Col1") + + if "mysql" in conn_name: + constraint_sentence = "CONSTRAINT test_pk PRIMARY KEY (`Col1`)" + else: + constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")' + assert constraint_sentence in create_sql + + # multiple columns as key (GH10385) + create_sql = sql.get_schema(test_frame1, "test", con=conn, keys=["A", "B"]) + if "mysql" in conn_name: + constraint_sentence = "CONSTRAINT test_pk PRIMARY KEY (`A`, `B`)" + else: + constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")' + assert constraint_sentence in create_sql + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_chunksize_read(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_chunksize", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_chunksize") + + df = DataFrame( + np.random.default_rng(2).standard_normal((22, 5)), columns=list("abcde") + ) + df.to_sql(name="test_chunksize", con=conn, index=False) + + # reading the query in one time + res1 = sql.read_sql_query("select * from test_chunksize", conn) + + # reading the query in chunks with read_sql_query + res2 = DataFrame() + i = 0 + sizes = [5, 5, 5, 5, 2] + + for chunk in sql.read_sql_query("select * from test_chunksize", conn, chunksize=5): + res2 = concat([res2, chunk], ignore_index=True) + assert len(chunk) == sizes[i] + i += 1 + + tm.assert_frame_equal(res1, res2) + + # reading the query in chunks with read_sql_query + if conn_name == "sqlite_buildin": + with pytest.raises(NotImplementedError, match=""): + sql.read_sql_table("test_chunksize", conn, chunksize=5) + else: + res3 = DataFrame() + i = 0 + sizes = [5, 5, 5, 5, 2] + + for chunk in sql.read_sql_table("test_chunksize", conn, chunksize=5): + res3 = concat([res3, chunk], ignore_index=True) + assert len(chunk) == sizes[i] + i += 1 + + tm.assert_frame_equal(res1, res3) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_categorical(conn, request): + if conn == "postgresql_adbc_conn": + adbc = import_optional_dependency("adbc_driver_postgresql", errors="ignore") + if adbc is not None and Version(adbc.__version__) < Version("0.9.0"): + request.node.add_marker( + pytest.mark.xfail( + reason="categorical dtype not implemented for ADBC postgres driver", + strict=True, + ) + ) + # GH8624 + # test that categorical gets written correctly as dense column + conn = request.getfixturevalue(conn) + if sql.has_table("test_categorical", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_categorical") + + df = DataFrame( + { + "person_id": [1, 2, 3], + "person_name": ["John P. Doe", "Jane Dove", "John P. Doe"], + } + ) + df2 = df.copy() + df2["person_name"] = df2["person_name"].astype("category") + + df2.to_sql(name="test_categorical", con=conn, index=False) + res = sql.read_sql_query("SELECT * FROM test_categorical", conn) + + tm.assert_frame_equal(res, df) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_unicode_column_name(conn, request): + # GH 11431 + conn = request.getfixturevalue(conn) + if sql.has_table("test_unicode", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_unicode") + + df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"]) + df.to_sql(name="test_unicode", con=conn, index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_escaped_table_name(conn, request): + # GH 13206 + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("d1187b08-4943-4c8d-a7f6", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("d1187b08-4943-4c8d-a7f6") + + df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]}) + df.to_sql(name="d1187b08-4943-4c8d-a7f6", con=conn, index=False) + + if "postgres" in conn_name: + query = 'SELECT * FROM "d1187b08-4943-4c8d-a7f6"' + else: + query = "SELECT * FROM `d1187b08-4943-4c8d-a7f6`" + res = sql.read_sql_query(query, conn) + + tm.assert_frame_equal(res, df) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_read_sql_duplicate_columns(conn, request): + # GH#53117 + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="pyarrow->pandas throws ValueError", strict=True) + ) + conn = request.getfixturevalue(conn) + if sql.has_table("test_table", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_table") + + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": 1}) + df.to_sql(name="test_table", con=conn, index=False) + + result = pd.read_sql("SELECT a, b, a +1 as a, c FROM test_table", conn) + expected = DataFrame( + [[1, 0.1, 2, 1], [2, 0.2, 3, 1], [3, 0.3, 4, 1]], + columns=["a", "b", "a", "c"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_read_table_columns(conn, request, test_frame1): + # test columns argument in read_table + conn_name = conn + if conn_name == "sqlite_buildin": + request.applymarker(pytest.mark.xfail(reason="Not Implemented")) + + conn = request.getfixturevalue(conn) + sql.to_sql(test_frame1, "test_frame", conn) + + cols = ["A", "B"] + + result = sql.read_sql_table("test_frame", conn, columns=cols) + assert result.columns.tolist() == cols + + +@pytest.mark.parametrize("conn", all_connectable) +def test_read_table_index_col(conn, request, test_frame1): + # test columns argument in read_table + conn_name = conn + if conn_name == "sqlite_buildin": + request.applymarker(pytest.mark.xfail(reason="Not Implemented")) + + conn = request.getfixturevalue(conn) + sql.to_sql(test_frame1, "test_frame", conn) + + result = sql.read_sql_table("test_frame", conn, index_col="index") + assert result.index.names == ["index"] + + result = sql.read_sql_table("test_frame", conn, index_col=["A", "B"]) + assert result.index.names == ["A", "B"] + + result = sql.read_sql_table( + "test_frame", conn, index_col=["A", "B"], columns=["C", "D"] + ) + assert result.index.names == ["A", "B"] + assert result.columns.tolist() == ["C", "D"] + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_delegate(conn, request): + if conn == "sqlite_buildin_iris": + request.applymarker( + pytest.mark.xfail( + reason="sqlite_buildin connection does not implement read_sql_table" + ) + ) + + conn = request.getfixturevalue(conn) + iris_frame1 = sql.read_sql_query("SELECT * FROM iris", conn) + iris_frame2 = sql.read_sql("SELECT * FROM iris", conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) + + iris_frame1 = sql.read_sql_table("iris", conn) + iris_frame2 = sql.read_sql("iris", conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) + + +def test_not_reflect_all_tables(sqlite_conn): + conn = sqlite_conn + from sqlalchemy import text + from sqlalchemy.engine import Engine + + # create invalid table + query_list = [ + text("CREATE TABLE invalid (x INTEGER, y UNKNOWN);"), + text("CREATE TABLE other_table (x INTEGER, y INTEGER);"), + ] + + for query in query_list: + if isinstance(conn, Engine): + with conn.connect() as conn: + with conn.begin(): + conn.execute(query) + else: + with conn.begin(): + conn.execute(query) + + with tm.assert_produces_warning(None): + sql.read_sql_table("other_table", conn) + sql.read_sql_query("SELECT * FROM other_table", conn) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_warning_case_insensitive_table_name(conn, request, test_frame1): + conn_name = conn + if conn_name == "sqlite_buildin" or "adbc" in conn_name: + request.applymarker(pytest.mark.xfail(reason="Does not raise warning")) + + conn = request.getfixturevalue(conn) + # see gh-7815 + with tm.assert_produces_warning( + UserWarning, + match=( + r"The provided table name 'TABLE1' is not found exactly as such in " + r"the database after writing the table, possibly due to case " + r"sensitivity issues. Consider using lower case table names." + ), + ): + with sql.SQLDatabase(conn) as db: + db.check_case_sensitive("TABLE1", "") + + # Test that the warning is certainly NOT triggered in a normal case. + with tm.assert_produces_warning(None): + test_frame1.to_sql(name="CaseSensitive", con=conn) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_sqlalchemy_type_mapping(conn, request): + conn = request.getfixturevalue(conn) + from sqlalchemy import TIMESTAMP + + # Test Timestamp objects (no datetime64 because of timezone) (GH9085) + df = DataFrame( + {"time": to_datetime(["2014-12-12 01:54", "2014-12-11 02:54"], utc=True)} + ) + with sql.SQLDatabase(conn) as db: + table = sql.SQLTable("test_type", db, frame=df) + # GH 9086: TIMESTAMP is the suggested type for datetimes with timezones + assert isinstance(table.table.c["time"].type, TIMESTAMP) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize( + "integer, expected", + [ + ("int8", "SMALLINT"), + ("Int8", "SMALLINT"), + ("uint8", "SMALLINT"), + ("UInt8", "SMALLINT"), + ("int16", "SMALLINT"), + ("Int16", "SMALLINT"), + ("uint16", "INTEGER"), + ("UInt16", "INTEGER"), + ("int32", "INTEGER"), + ("Int32", "INTEGER"), + ("uint32", "BIGINT"), + ("UInt32", "BIGINT"), + ("int64", "BIGINT"), + ("Int64", "BIGINT"), + (int, "BIGINT" if np.dtype(int).name == "int64" else "INTEGER"), + ], +) +def test_sqlalchemy_integer_mapping(conn, request, integer, expected): + # GH35076 Map pandas integer to optimal SQLAlchemy integer type + conn = request.getfixturevalue(conn) + df = DataFrame([0, 1], columns=["a"], dtype=integer) + with sql.SQLDatabase(conn) as db: + table = sql.SQLTable("test_type", db, frame=df) + + result = str(table.table.c.a.type) + assert result == expected + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize("integer", ["uint64", "UInt64"]) +def test_sqlalchemy_integer_overload_mapping(conn, request, integer): + conn = request.getfixturevalue(conn) + # GH35076 Map pandas integer to optimal SQLAlchemy integer type + df = DataFrame([0, 1], columns=["a"], dtype=integer) + with sql.SQLDatabase(conn) as db: + with pytest.raises( + ValueError, match="Unsigned 64 bit integer datatype is not supported" + ): + sql.SQLTable("test_type", db, frame=df) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_database_uri_string(conn, request, test_frame1): + pytest.importorskip("sqlalchemy") + conn = request.getfixturevalue(conn) + # Test read_sql and .to_sql method with a database URI (GH10654) + # db_uri = 'sqlite:///:memory:' # raises + # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near + # "iris": syntax error [SQL: 'iris'] + with tm.ensure_clean() as name: + db_uri = "sqlite:///" + name + table = "iris" + test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False) + test_frame2 = sql.read_sql(table, db_uri) + test_frame3 = sql.read_sql_table(table, db_uri) + query = "SELECT * FROM iris" + test_frame4 = sql.read_sql_query(query, db_uri) + tm.assert_frame_equal(test_frame1, test_frame2) + tm.assert_frame_equal(test_frame1, test_frame3) + tm.assert_frame_equal(test_frame1, test_frame4) + + +@td.skip_if_installed("pg8000") +@pytest.mark.parametrize("conn", all_connectable) +def test_pg8000_sqlalchemy_passthrough_error(conn, request): + pytest.importorskip("sqlalchemy") + conn = request.getfixturevalue(conn) + # using driver that will not be installed on CI to trigger error + # in sqlalchemy.create_engine -> test passing of this error to user + db_uri = "postgresql+pg8000://user:pass@host/dbname" + with pytest.raises(ImportError, match="pg8000"): + sql.read_sql("select * from table", db_uri) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_query_by_text_obj(conn, request): + # WIP : GH10846 + conn_name = conn + conn = request.getfixturevalue(conn) + from sqlalchemy import text + + if "postgres" in conn_name: + name_text = text('select * from iris where "Name"=:name') + else: + name_text = text("select * from iris where name=:name") + iris_df = sql.read_sql(name_text, conn, params={"name": "Iris-versicolor"}) + all_names = set(iris_df["Name"]) + assert all_names == {"Iris-versicolor"} + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_query_by_select_obj(conn, request): + conn = request.getfixturevalue(conn) + # WIP : GH10846 + from sqlalchemy import ( + bindparam, + select, + ) + + iris = iris_table_metadata() + name_select = select(iris).where(iris.c.Name == bindparam("name")) + iris_df = sql.read_sql(name_select, conn, params={"name": "Iris-setosa"}) + all_names = set(iris_df["Name"]) + assert all_names == {"Iris-setosa"} + + +@pytest.mark.parametrize("conn", all_connectable) +def test_column_with_percentage(conn, request): + # GH 37157 + conn_name = conn + if conn_name == "sqlite_buildin": + request.applymarker(pytest.mark.xfail(reason="Not Implemented")) + + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "%_variation": [3, 4, 5]}) + df.to_sql(name="test_column_percentage", con=conn, index=False) + + res = sql.read_sql_table("test_column_percentage", conn) + + tm.assert_frame_equal(res, df) + + +def test_sql_open_close(test_frame3): + # Test if the IO in the database still work if the connection closed + # between the writing and reading (as in many real situations). + + with tm.ensure_clean() as name: + with closing(sqlite3.connect(name)) as conn: + assert sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) == 4 + + with closing(sqlite3.connect(name)) as conn: + result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn) + + tm.assert_frame_equal(test_frame3, result) + + +@td.skip_if_installed("sqlalchemy") +def test_con_string_import_error(): + conn = "mysql://root@localhost/pandas" + msg = "Using URI string without sqlalchemy installed" + with pytest.raises(ImportError, match=msg): + sql.read_sql("SELECT * FROM iris", conn) + + +@td.skip_if_installed("sqlalchemy") +def test_con_unknown_dbapi2_class_does_not_error_without_sql_alchemy_installed(): + class MockSqliteConnection: + def __init__(self, *args, **kwargs) -> None: + self.conn = sqlite3.Connection(*args, **kwargs) + + def __getattr__(self, name): + return getattr(self.conn, name) + + def close(self): + self.conn.close() + + with contextlib.closing(MockSqliteConnection(":memory:")) as conn: + with tm.assert_produces_warning(UserWarning): + sql.read_sql("SELECT 1", conn) + + +def test_sqlite_read_sql_delegate(sqlite_buildin_iris): + conn = sqlite_buildin_iris + iris_frame1 = sql.read_sql_query("SELECT * FROM iris", conn) + iris_frame2 = sql.read_sql("SELECT * FROM iris", conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) + + msg = "Execution failed on sql 'iris': near \"iris\": syntax error" + with pytest.raises(sql.DatabaseError, match=msg): + sql.read_sql("iris", conn) + + +def test_get_schema2(test_frame1): + # without providing a connection object (available for backwards comp) + create_sql = sql.get_schema(test_frame1, "test") + assert "CREATE" in create_sql + + +def test_sqlite_type_mapping(sqlite_buildin): + # Test Timestamp objects (no datetime64 because of timezone) (GH9085) + conn = sqlite_buildin + df = DataFrame( + {"time": to_datetime(["2014-12-12 01:54", "2014-12-11 02:54"], utc=True)} + ) + db = sql.SQLiteDatabase(conn) + table = sql.SQLiteTable("test_type", db, frame=df) + schema = table.sql_schema() + for col in schema.split("\n"): + if col.split()[0].strip('"') == "time": + assert col.split()[1] == "TIMESTAMP" + + +# ----------------------------------------------------------------------------- +# -- Database flavor specific tests + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_create_table(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import inspect + + temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + assert pandasSQL.to_sql(temp_frame, "temp_frame") == 4 + + insp = inspect(conn) + assert insp.has_table("temp_frame") + + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("temp_frame") + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_drop_table(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import inspect + + temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) + with sql.SQLDatabase(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(temp_frame, "temp_frame") == 4 + + insp = inspect(conn) + assert insp.has_table("temp_frame") + + with pandasSQL.run_transaction(): + pandasSQL.drop_table("temp_frame") + try: + insp.clear_cache() # needed with SQLAlchemy 2.0, unavailable prior + except AttributeError: + pass + assert not insp.has_table("temp_frame") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_roundtrip(conn, request, test_frame1): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn_name = conn + conn = request.getfixturevalue(conn) + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame_roundtrip") == 4 + result = pandasSQL.read_query("SELECT * FROM test_frame_roundtrip") + + if "adbc" in conn_name: + result = result.rename(columns={"__index_level_0__": "level_0"}) + result.set_index("level_0", inplace=True) + # result.index.astype(int) + + result.index.name = None + + tm.assert_frame_equal(result, test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_execute_sql(conn, request): + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + iris_results = pandasSQL.execute("SELECT * FROM iris") + row = iris_results.fetchone() + iris_results.close() + assert list(row) == [5.1, 3.5, 1.4, 0.2, "Iris-setosa"] + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_sqlalchemy_read_table(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = sql.read_sql_table("iris", con=conn) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_sqlalchemy_read_table_columns(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = sql.read_sql_table( + "iris", con=conn, columns=["SepalLength", "SepalLength"] + ) + tm.assert_index_equal(iris_frame.columns, Index(["SepalLength", "SepalLength__1"])) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_read_table_absent_raises(conn, request): + conn = request.getfixturevalue(conn) + msg = "Table this_doesnt_exist not found" + with pytest.raises(ValueError, match=msg): + sql.read_sql_table("this_doesnt_exist", con=conn) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_types) +def test_sqlalchemy_default_type_conversion(conn, request): + conn_name = conn + if conn_name == "sqlite_str": + pytest.skip("types tables not created in sqlite_str fixture") + elif "mysql" in conn_name or "sqlite" in conn_name: + request.applymarker( + pytest.mark.xfail(reason="boolean dtype not inferred properly") + ) + + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + + assert issubclass(df.FloatCol.dtype.type, np.floating) + assert issubclass(df.IntCol.dtype.type, np.integer) + assert issubclass(df.BoolCol.dtype.type, np.bool_) + + # Int column with NA values stays as float + assert issubclass(df.IntColWithNull.dtype.type, np.floating) + # Bool column with NA values becomes object + assert issubclass(df.BoolColWithNull.dtype.type, object) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_bigint(conn, request): + # int64 should be converted to BigInteger, GH7433 + conn = request.getfixturevalue(conn) + df = DataFrame(data={"i64": [2**62]}) + assert df.to_sql(name="test_bigint", con=conn, index=False) == 1 + result = sql.read_sql_table("test_bigint", conn) + + tm.assert_frame_equal(df, result) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_types) +def test_default_date_load(conn, request): + conn_name = conn + if conn_name == "sqlite_str": + pytest.skip("types tables not created in sqlite_str fixture") + elif "sqlite" in conn_name: + request.applymarker( + pytest.mark.xfail(reason="sqlite does not read date properly") + ) + + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +@pytest.mark.parametrize("parse_dates", [None, ["DateColWithTz"]]) +def test_datetime_with_timezone_query(conn, request, parse_dates): + # edge case that converts postgresql datetime with time zone types + # to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok + # but should be more natural, so coerce to datetime64[ns] for now + conn = request.getfixturevalue(conn) + expected = create_and_load_postgres_datetz(conn) + + # GH11216 + df = read_sql_query("select * from datetz", conn, parse_dates=parse_dates) + col = df.DateColWithTz + tm.assert_series_equal(col, expected) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_datetime_with_timezone_query_chunksize(conn, request): + conn = request.getfixturevalue(conn) + expected = create_and_load_postgres_datetz(conn) + + df = concat( + list(read_sql_query("select * from datetz", conn, chunksize=1)), + ignore_index=True, + ) + col = df.DateColWithTz + tm.assert_series_equal(col, expected) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_datetime_with_timezone_table(conn, request): + conn = request.getfixturevalue(conn) + expected = create_and_load_postgres_datetz(conn) + result = sql.read_sql_table("datetz", conn) + tm.assert_frame_equal(result, expected.to_frame()) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_with_timezone_roundtrip(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + # GH 9086 + # Write datetimetz data to a db and read it back + # For dbs that support timestamps with timezones, should get back UTC + # otherwise naive data should be returned + expected = DataFrame( + {"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")} + ) + assert expected.to_sql(name="test_datetime_tz", con=conn, index=False) == 3 + + if "postgresql" in conn_name: + # SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC + expected["A"] = expected["A"].dt.tz_convert("UTC") + else: + # Otherwise, timestamps are returned as local, naive + expected["A"] = expected["A"].dt.tz_localize(None) + + result = sql.read_sql_table("test_datetime_tz", conn) + tm.assert_frame_equal(result, expected) + + result = sql.read_sql_query("SELECT * FROM test_datetime_tz", conn) + if "sqlite" in conn_name: + # read_sql_query does not return datetime type like read_sql_table + assert isinstance(result.loc[0, "A"], str) + result["A"] = to_datetime(result["A"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_out_of_bounds_datetime(conn, request): + # GH 26761 + conn = request.getfixturevalue(conn) + data = DataFrame({"date": datetime(9999, 1, 1)}, index=[0]) + assert data.to_sql(name="test_datetime_obb", con=conn, index=False) == 1 + result = sql.read_sql_table("test_datetime_obb", conn) + expected = DataFrame([pd.NaT], columns=["date"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_naive_datetimeindex_roundtrip(conn, request): + # GH 23510 + # Ensure that a naive DatetimeIndex isn't converted to UTC + conn = request.getfixturevalue(conn) + dates = date_range("2018-01-01", periods=5, freq="6h")._with_freq(None) + expected = DataFrame({"nums": range(5)}, index=dates) + assert expected.to_sql(name="foo_table", con=conn, index_label="info_date") == 5 + result = sql.read_sql_table("foo_table", conn, index_col="info_date") + # result index with gain a name from a set_index operation; expected + tm.assert_frame_equal(result, expected, check_names=False) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_types) +def test_date_parsing(conn, request): + # No Parsing + conn_name = conn + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + expected_type = object if "sqlite" in conn_name else np.datetime64 + assert issubclass(df.DateCol.dtype.type, expected_type) + + df = sql.read_sql_table("types", conn, parse_dates=["DateCol"]) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table( + "types", + conn, + parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}}, + ) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates=["IntDateCol"]) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates={"IntDateCol": "s"}) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates={"IntDateCol": {"unit": "s"}}) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame( + {"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)} + ) + assert df.to_sql(name="test_datetime", con=conn) == 3 + + # with read_table -> type information from schema used + result = sql.read_sql_table("test_datetime", conn) + result = result.drop("index", axis=1) + tm.assert_frame_equal(result, df) + + # with read_sql -> no type information -> sqlite has no native + result = sql.read_sql_query("SELECT * FROM test_datetime", conn) + result = result.drop("index", axis=1) + if "sqlite" in conn_name: + assert isinstance(result.loc[0, "A"], str) + result["A"] = to_datetime(result["A"]) + tm.assert_frame_equal(result, df) + else: + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_NaT(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame( + {"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)} + ) + df.loc[1, "A"] = np.nan + assert df.to_sql(name="test_datetime", con=conn, index=False) == 3 + + # with read_table -> type information from schema used + result = sql.read_sql_table("test_datetime", conn) + tm.assert_frame_equal(result, df) + + # with read_sql -> no type information -> sqlite has no native + result = sql.read_sql_query("SELECT * FROM test_datetime", conn) + if "sqlite" in conn_name: + assert isinstance(result.loc[0, "A"], str) + result["A"] = to_datetime(result["A"], errors="coerce") + tm.assert_frame_equal(result, df) + else: + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_date(conn, request): + # test support for datetime.date + conn = request.getfixturevalue(conn) + df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"]) + assert df.to_sql(name="test_date", con=conn, index=False) == 2 + res = read_sql_table("test_date", conn) + result = res["a"] + expected = to_datetime(df["a"]) + # comes back as datetime64 + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_time(conn, request, sqlite_buildin): + # test support for datetime.time + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"]) + assert df.to_sql(name="test_time", con=conn, index=False) == 2 + res = read_sql_table("test_time", conn) + tm.assert_frame_equal(res, df) + + # GH8341 + # first, use the fallback to have the sqlite adapter put in place + sqlite_conn = sqlite_buildin + assert sql.to_sql(df, "test_time2", sqlite_conn, index=False) == 2 + res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn) + ref = df.map(lambda _: _.strftime("%H:%M:%S.%f")) + tm.assert_frame_equal(ref, res) # check if adapter is in place + # then test if sqlalchemy is unaffected by the sqlite adapter + assert sql.to_sql(df, "test_time3", conn, index=False) == 2 + if "sqlite" in conn_name: + res = sql.read_sql_query("SELECT * FROM test_time3", conn) + ref = df.map(lambda _: _.strftime("%H:%M:%S.%f")) + tm.assert_frame_equal(ref, res) + res = sql.read_sql_table("test_time3", conn) + tm.assert_frame_equal(df, res) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_mixed_dtype_insert(conn, request): + # see GH6509 + conn = request.getfixturevalue(conn) + s1 = Series(2**25 + 1, dtype=np.int32) + s2 = Series(0.0, dtype=np.float32) + df = DataFrame({"s1": s1, "s2": s2}) + + # write and read again + assert df.to_sql(name="test_read_write", con=conn, index=False) == 1 + df2 = sql.read_sql_table("test_read_write", conn) + + tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_nan_numeric(conn, request): + # NaNs in numeric float column + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]}) + assert df.to_sql(name="test_nan", con=conn, index=False) == 3 + + # with read_table + result = sql.read_sql_table("test_nan", conn) + tm.assert_frame_equal(result, df) + + # with read_sql + result = sql.read_sql_query("SELECT * FROM test_nan", conn) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_nan_fullcolumn(conn, request): + # full NaN column (numeric float column) + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]}) + assert df.to_sql(name="test_nan", con=conn, index=False) == 3 + + # with read_table + result = sql.read_sql_table("test_nan", conn) + tm.assert_frame_equal(result, df) + + # with read_sql -> not type info from table -> stays None + df["B"] = df["B"].astype("object") + df["B"] = None + result = sql.read_sql_query("SELECT * FROM test_nan", conn) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_nan_string(conn, request): + # NaNs in string column + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]}) + assert df.to_sql(name="test_nan", con=conn, index=False) == 3 + + # NaNs are coming back as None + df.loc[2, "B"] = None + + # with read_table + result = sql.read_sql_table("test_nan", conn) + tm.assert_frame_equal(result, df) + + # with read_sql + result = sql.read_sql_query("SELECT * FROM test_nan", conn) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_save_index(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="ADBC implementation does not create index", strict=True + ) + ) + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame.from_records( + [(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"] + ) + + tbl_name = "test_to_sql_saves_index" + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(df, tbl_name) == 2 + + if conn_name in {"sqlite_buildin", "sqlite_str"}: + ixs = sql.read_sql_query( + "SELECT * FROM sqlite_master WHERE type = 'index' " + f"AND tbl_name = '{tbl_name}'", + conn, + ) + ix_cols = [] + for ix_name in ixs.name: + ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", conn) + ix_cols.append(ix_info.name.tolist()) + else: + from sqlalchemy import inspect + + insp = inspect(conn) + + ixs = insp.get_indexes(tbl_name) + ix_cols = [i["column_names"] for i in ixs] + + assert ix_cols == [["A"]] + + +@pytest.mark.parametrize("conn", all_connectable) +def test_transactions(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + + stmt = "CREATE TABLE test_trans (A INT, B TEXT)" + if conn_name != "sqlite_buildin" and "adbc" not in conn_name: + from sqlalchemy import text + + stmt = text(stmt) + + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction() as trans: + trans.execute(stmt) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_transaction_rollback(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction() as trans: + stmt = "CREATE TABLE test_trans (A INT, B TEXT)" + if "adbc" in conn_name or isinstance(pandasSQL, SQLiteDatabase): + trans.execute(stmt) + else: + from sqlalchemy import text + + stmt = text(stmt) + trans.execute(stmt) + + class DummyException(Exception): + pass + + # Make sure when transaction is rolled back, no rows get inserted + ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')" + if isinstance(pandasSQL, SQLDatabase): + from sqlalchemy import text + + ins_sql = text(ins_sql) + try: + with pandasSQL.run_transaction() as trans: + trans.execute(ins_sql) + raise DummyException("error") + except DummyException: + # ignore raised exception + pass + with pandasSQL.run_transaction(): + res = pandasSQL.read_query("SELECT * FROM test_trans") + assert len(res) == 0 + + # Make sure when transaction is committed, rows do get inserted + with pandasSQL.run_transaction() as trans: + trans.execute(ins_sql) + res2 = pandasSQL.read_query("SELECT * FROM test_trans") + assert len(res2) == 1 + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_get_schema_create_table(conn, request, test_frame3): + # Use a dataframe without a bool column, since MySQL converts bool to + # TINYINT (which read_sql_table returns as an int and causes a dtype + # mismatch) + if conn == "sqlite_str": + request.applymarker( + pytest.mark.xfail(reason="test does not support sqlite_str fixture") + ) + + conn = request.getfixturevalue(conn) + + from sqlalchemy import text + from sqlalchemy.engine import Engine + + tbl = "test_get_schema_create_table" + create_sql = sql.get_schema(test_frame3, tbl, con=conn) + blank_test_df = test_frame3.iloc[:0] + + create_sql = text(create_sql) + if isinstance(conn, Engine): + with conn.connect() as newcon: + with newcon.begin(): + newcon.execute(create_sql) + else: + conn.execute(create_sql) + returned_df = sql.read_sql_table(tbl, conn) + tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_dtype(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + TEXT, + String, + ) + from sqlalchemy.schema import MetaData + + cols = ["A", "B"] + data = [(0.8, True), (0.9, None)] + df = DataFrame(data, columns=cols) + assert df.to_sql(name="dtype_test", con=conn) == 2 + assert df.to_sql(name="dtype_test2", con=conn, dtype={"B": TEXT}) == 2 + meta = MetaData() + meta.reflect(bind=conn) + sqltype = meta.tables["dtype_test2"].columns["B"].type + assert isinstance(sqltype, TEXT) + msg = "The type of B is not a SQLAlchemy type" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="error", con=conn, dtype={"B": str}) + + # GH9083 + assert df.to_sql(name="dtype_test3", con=conn, dtype={"B": String(10)}) == 2 + meta.reflect(bind=conn) + sqltype = meta.tables["dtype_test3"].columns["B"].type + assert isinstance(sqltype, String) + assert sqltype.length == 10 + + # single dtype + assert df.to_sql(name="single_dtype_test", con=conn, dtype=TEXT) == 2 + meta.reflect(bind=conn) + sqltypea = meta.tables["single_dtype_test"].columns["A"].type + sqltypeb = meta.tables["single_dtype_test"].columns["B"].type + assert isinstance(sqltypea, TEXT) + assert isinstance(sqltypeb, TEXT) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_notna_dtype(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn_name = conn + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + Boolean, + DateTime, + Float, + Integer, + ) + from sqlalchemy.schema import MetaData + + cols = { + "Bool": Series([True, None]), + "Date": Series([datetime(2012, 5, 1), None]), + "Int": Series([1, None], dtype="object"), + "Float": Series([1.1, None]), + } + df = DataFrame(cols) + + tbl = "notna_dtype_test" + assert df.to_sql(name=tbl, con=conn) == 2 + _ = sql.read_sql_table(tbl, conn) + meta = MetaData() + meta.reflect(bind=conn) + my_type = Integer if "mysql" in conn_name else Boolean + col_dict = meta.tables[tbl].columns + assert isinstance(col_dict["Bool"].type, my_type) + assert isinstance(col_dict["Date"].type, DateTime) + assert isinstance(col_dict["Int"].type, Integer) + assert isinstance(col_dict["Float"].type, Float) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_double_precision(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + BigInteger, + Float, + Integer, + ) + from sqlalchemy.schema import MetaData + + V = 1.23456789101112131415 + + df = DataFrame( + { + "f32": Series([V], dtype="float32"), + "f64": Series([V], dtype="float64"), + "f64_as_f32": Series([V], dtype="float64"), + "i32": Series([5], dtype="int32"), + "i64": Series([5], dtype="int64"), + } + ) + + assert ( + df.to_sql( + name="test_dtypes", + con=conn, + index=False, + if_exists="replace", + dtype={"f64_as_f32": Float(precision=23)}, + ) + == 1 + ) + res = sql.read_sql_table("test_dtypes", conn) + + # check precision of float64 + assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14) + + # check sql types + meta = MetaData() + meta.reflect(bind=conn) + col_dict = meta.tables["test_dtypes"].columns + assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type) + assert isinstance(col_dict["f32"].type, Float) + assert isinstance(col_dict["f64"].type, Float) + assert isinstance(col_dict["i32"].type, Integer) + assert isinstance(col_dict["i64"].type, BigInteger) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_connectable_issue_example(conn, request): + conn = request.getfixturevalue(conn) + + # This tests the example raised in issue + # https://github.com/pandas-dev/pandas/issues/10104 + from sqlalchemy.engine import Engine + + def test_select(connection): + query = "SELECT test_foo_data FROM test_foo_data" + return sql.read_sql_query(query, con=connection) + + def test_append(connection, data): + data.to_sql(name="test_foo_data", con=connection, if_exists="append") + + def test_connectable(conn): + # https://github.com/sqlalchemy/sqlalchemy/commit/ + # 00b5c10846e800304caa86549ab9da373b42fa5d#r48323973 + foo_data = test_select(conn) + test_append(conn, foo_data) + + def main(connectable): + if isinstance(connectable, Engine): + with connectable.connect() as conn: + with conn.begin(): + test_connectable(conn) + else: + test_connectable(connectable) + + assert ( + DataFrame({"test_foo_data": [0, 1, 2]}).to_sql(name="test_foo_data", con=conn) + == 3 + ) + main(conn) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize( + "input", + [{"foo": [np.inf]}, {"foo": [-np.inf]}, {"foo": [-np.inf], "infe0": ["bar"]}], +) +def test_to_sql_with_negative_npinf(conn, request, input): + # GH 34431 + + df = DataFrame(input) + conn_name = conn + conn = request.getfixturevalue(conn) + + if "mysql" in conn_name: + # GH 36465 + # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error + # for pymysql version >= 0.10 + # TODO(GH#36465): remove this version check after GH 36465 is fixed + pymysql = pytest.importorskip("pymysql") + + if Version(pymysql.__version__) < Version("1.0.3") and "infe0" in df.columns: + mark = pytest.mark.xfail(reason="GH 36465") + request.applymarker(mark) + + msg = "inf cannot be used with MySQL" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="foobar", con=conn, index=False) + else: + assert df.to_sql(name="foobar", con=conn, index=False) == 1 + res = sql.read_sql_table("foobar", conn) + tm.assert_equal(df, res) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_temporary_table(conn, request): + if conn == "sqlite_str": + pytest.skip("test does not work with str connection") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + Column, + Integer, + Unicode, + select, + ) + from sqlalchemy.orm import ( + Session, + declarative_base, + ) + + test_data = "Hello, World!" + expected = DataFrame({"spam": [test_data]}) + Base = declarative_base() + + class Temporary(Base): + __tablename__ = "temp_test" + __table_args__ = {"prefixes": ["TEMPORARY"]} + id = Column(Integer, primary_key=True) + spam = Column(Unicode(30), nullable=False) + + with Session(conn) as session: + with session.begin(): + conn = session.connection() + Temporary.__table__.create(conn) + session.add(Temporary(spam=test_data)) + session.flush() + df = sql.read_sql_query(sql=select(Temporary.spam), con=conn) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_invalid_engine(conn, request, test_frame1): + if conn == "sqlite_buildin" or "adbc" in conn: + request.applymarker( + pytest.mark.xfail( + reason="SQLiteDatabase/ADBCDatabase does not raise for bad engine" + ) + ) + + conn = request.getfixturevalue(conn) + msg = "engine must be one of 'auto', 'sqlalchemy'" + with pandasSQL_builder(conn) as pandasSQL: + with pytest.raises(ValueError, match=msg): + pandasSQL.to_sql(test_frame1, "test_frame1", engine="bad_engine") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_with_sql_engine(conn, request, test_frame1): + """`to_sql` with the `engine` param""" + # mostly copied from this class's `_to_sql()` method + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame1", engine="auto") == 4 + assert pandasSQL.has_table("test_frame1") + + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame1") + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_options_sqlalchemy(conn, request, test_frame1): + # use the set option + conn = request.getfixturevalue(conn) + with pd.option_context("io.sql.engine", "sqlalchemy"): + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame1") == 4 + assert pandasSQL.has_table("test_frame1") + + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame1") + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", all_connectable) +def test_options_auto(conn, request, test_frame1): + # use the set option + conn = request.getfixturevalue(conn) + with pd.option_context("io.sql.engine", "auto"): + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame1") == 4 + assert pandasSQL.has_table("test_frame1") + + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame1") + assert num_rows == num_entries + + +def test_options_get_engine(): + pytest.importorskip("sqlalchemy") + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + with pd.option_context("io.sql.engine", "sqlalchemy"): + assert isinstance(get_engine("auto"), SQLAlchemyEngine) + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + with pd.option_context("io.sql.engine", "auto"): + assert isinstance(get_engine("auto"), SQLAlchemyEngine) + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + +def test_get_engine_auto_error_message(): + # Expect different error messages from get_engine(engine="auto") + # if engines aren't installed vs. are installed but bad version + pass + # TODO(GH#36893) fill this in when we add more engines + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_query"]) +def test_read_sql_dtype_backend( + conn, + request, + string_storage, + func, + dtype_backend, + dtype_backend_data, + dtype_backend_expected, +): + # GH#50048 + conn_name = conn + conn = request.getfixturevalue(conn) + table = "test" + df = dtype_backend_data + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + with pd.option_context("mode.string_storage", string_storage): + result = getattr(pd, func)( + f"Select * from {table}", conn, dtype_backend=dtype_backend + ) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + tm.assert_frame_equal(result, expected) + + if "adbc" in conn_name: + # adbc does not support chunksize argument + request.applymarker( + pytest.mark.xfail(reason="adbc does not support chunksize argument") + ) + + with pd.option_context("mode.string_storage", string_storage): + iterator = getattr(pd, func)( + f"Select * from {table}", + con=conn, + dtype_backend=dtype_backend, + chunksize=3, + ) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + for result in iterator: + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_table"]) +def test_read_sql_dtype_backend_table( + conn, + request, + string_storage, + func, + dtype_backend, + dtype_backend_data, + dtype_backend_expected, +): + if "sqlite" in conn and "adbc" not in conn: + request.applymarker( + pytest.mark.xfail( + reason=( + "SQLite actually returns proper boolean values via " + "read_sql_table, but before pytest refactor was skipped" + ) + ) + ) + # GH#50048 + conn_name = conn + conn = request.getfixturevalue(conn) + table = "test" + df = dtype_backend_data + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + with pd.option_context("mode.string_storage", string_storage): + result = getattr(pd, func)(table, conn, dtype_backend=dtype_backend) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + tm.assert_frame_equal(result, expected) + + if "adbc" in conn_name: + # adbc does not support chunksize argument + return + + with pd.option_context("mode.string_storage", string_storage): + iterator = getattr(pd, func)( + table, + conn, + dtype_backend=dtype_backend, + chunksize=3, + ) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + for result in iterator: + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_table", "read_sql_query"]) +def test_read_sql_invalid_dtype_backend_table(conn, request, func, dtype_backend_data): + conn = request.getfixturevalue(conn) + table = "test" + df = dtype_backend_data + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + getattr(pd, func)(table, conn, dtype_backend="numpy") + + +@pytest.fixture +def dtype_backend_data() -> DataFrame: + return DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": [True, False, None], + "f": [True, False, True], + "g": ["a", "b", "c"], + "h": ["a", "b", None], + } + ) + + +@pytest.fixture +def dtype_backend_expected(): + def func(storage, dtype_backend, conn_name) -> DataFrame: + string_array: StringArray | ArrowStringArray + string_array_na: StringArray | ArrowStringArray + if storage == "python": + string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) + string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) # type: ignore[assignment] + string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) # type: ignore[assignment] + + else: + pa = pytest.importorskip("pyarrow") + string_array = ArrowStringArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArray(pa.array(["a", "b", None])) + + df = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": Series([True, False, pd.NA], dtype="boolean"), + "f": Series([True, False, True], dtype="boolean"), + "g": string_array, + "h": string_array_na, + } + ) + if dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + + from pandas.arrays import ArrowExtensionArray + + df = DataFrame( + { + col: ArrowExtensionArray(pa.array(df[col], from_pandas=True)) + for col in df.columns + } + ) + + if "mysql" in conn_name or "sqlite" in conn_name: + if dtype_backend == "numpy_nullable": + df = df.astype({"e": "Int64", "f": "Int64"}) + else: + df = df.astype({"e": "int64[pyarrow]", "f": "int64[pyarrow]"}) + + return df + + return func + + +@pytest.mark.parametrize("conn", all_connectable) +def test_chunksize_empty_dtypes(conn, request): + # GH#50245 + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + dtypes = {"a": "int64", "b": "object"} + df = DataFrame(columns=["a", "b"]).astype(dtypes) + expected = df.copy() + df.to_sql(name="test", con=conn, index=False, if_exists="replace") + + for result in read_sql_query( + "SELECT * FROM test", + conn, + dtype=dtypes, + chunksize=1, + ): + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("dtype_backend", [lib.no_default, "numpy_nullable"]) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_query"]) +def test_read_sql_dtype(conn, request, func, dtype_backend): + # GH#50797 + conn = request.getfixturevalue(conn) + table = "test" + df = DataFrame({"a": [1, 2, 3], "b": 5}) + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + result = getattr(pd, func)( + f"Select * from {table}", + conn, + dtype={"a": np.float64}, + dtype_backend=dtype_backend, + ) + expected = DataFrame( + { + "a": Series([1, 2, 3], dtype=np.float64), + "b": Series( + [5, 5, 5], + dtype="int64" if not dtype_backend == "numpy_nullable" else "Int64", + ), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_keyword_deprecation(sqlite_engine): + conn = sqlite_engine + # GH 54397 + msg = ( + "Starting with pandas version 3.0 all arguments of to_sql except for the " + "arguments 'name' and 'con' will be keyword-only." + ) + df = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 1, "B": 2, "C": 3}]) + df.to_sql("example", conn) + + with tm.assert_produces_warning(FutureWarning, match=msg): + df.to_sql("example", conn, None, if_exists="replace") + + +def test_bigint_warning(sqlite_engine): + conn = sqlite_engine + # test no warning for BIGINT (to support int64) is raised (GH7433) + df = DataFrame({"a": [1, 2]}, dtype="int64") + assert df.to_sql(name="test_bigintwarning", con=conn, index=False) == 2 + + with tm.assert_produces_warning(None): + sql.read_sql_table("test_bigintwarning", conn) + + +def test_valueerror_exception(sqlite_engine): + conn = sqlite_engine + df = DataFrame({"col1": [1, 2], "col2": [3, 4]}) + with pytest.raises(ValueError, match="Empty table name specified"): + df.to_sql(name="", con=conn, if_exists="replace", index=False) + + +def test_row_object_is_named_tuple(sqlite_engine): + conn = sqlite_engine + # GH 40682 + # Test for the is_named_tuple() function + # Placed here due to its usage of sqlalchemy + + from sqlalchemy import ( + Column, + Integer, + String, + ) + from sqlalchemy.orm import ( + declarative_base, + sessionmaker, + ) + + BaseModel = declarative_base() + + class Test(BaseModel): + __tablename__ = "test_frame" + id = Column(Integer, primary_key=True) + string_column = Column(String(50)) + + with conn.begin(): + BaseModel.metadata.create_all(conn) + Session = sessionmaker(bind=conn) + with Session() as session: + df = DataFrame({"id": [0, 1], "string_column": ["hello", "world"]}) + assert ( + df.to_sql(name="test_frame", con=conn, index=False, if_exists="replace") + == 2 + ) + session.commit() + test_query = session.query(Test.id, Test.string_column) + df = DataFrame(test_query) + + assert list(df.columns) == ["id", "string_column"] + + +def test_read_sql_string_inference(sqlite_engine): + conn = sqlite_engine + # GH#54430 + pytest.importorskip("pyarrow") + table = "test" + df = DataFrame({"a": ["x", "y"]}) + df.to_sql(table, con=conn, index=False, if_exists="replace") + + with pd.option_context("future.infer_string", True): + result = read_sql_table(table, conn) + + dtype = "string[pyarrow_numpy]" + expected = DataFrame( + {"a": ["x", "y"]}, dtype=dtype, columns=Index(["a"], dtype=dtype) + ) + + tm.assert_frame_equal(result, expected) + + +def test_roundtripping_datetimes(sqlite_engine): + conn = sqlite_engine + # GH#54877 + df = DataFrame({"t": [datetime(2020, 12, 31, 12)]}, dtype="datetime64[ns]") + df.to_sql("test", conn, if_exists="replace", index=False) + result = pd.read_sql("select * from test", conn).iloc[0, 0] + assert result == "2020-12-31 12:00:00.000000" + + +@pytest.fixture +def sqlite_builtin_detect_types(): + with contextlib.closing( + sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES) + ) as closing_conn: + with closing_conn as conn: + yield conn + + +def test_roundtripping_datetimes_detect_types(sqlite_builtin_detect_types): + # https://github.com/pandas-dev/pandas/issues/55554 + conn = sqlite_builtin_detect_types + df = DataFrame({"t": [datetime(2020, 12, 31, 12)]}, dtype="datetime64[ns]") + df.to_sql("test", conn, if_exists="replace", index=False) + result = pd.read_sql("select * from test", conn).iloc[0, 0] + assert result == Timestamp("2020-12-31 12:00:00.000000") + + +@pytest.mark.db +def test_psycopg2_schema_support(postgresql_psycopg2_engine): + conn = postgresql_psycopg2_engine + + # only test this for postgresql (schema's not supported in + # mysql/sqlite) + df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]}) + + # create a schema + with conn.connect() as con: + with con.begin(): + con.exec_driver_sql("DROP SCHEMA IF EXISTS other CASCADE;") + con.exec_driver_sql("CREATE SCHEMA other;") + + # write dataframe to different schema's + assert df.to_sql(name="test_schema_public", con=conn, index=False) == 2 + assert ( + df.to_sql( + name="test_schema_public_explicit", + con=conn, + index=False, + schema="public", + ) + == 2 + ) + assert ( + df.to_sql(name="test_schema_other", con=conn, index=False, schema="other") == 2 + ) + + # read dataframes back in + res1 = sql.read_sql_table("test_schema_public", conn) + tm.assert_frame_equal(df, res1) + res2 = sql.read_sql_table("test_schema_public_explicit", conn) + tm.assert_frame_equal(df, res2) + res3 = sql.read_sql_table("test_schema_public_explicit", conn, schema="public") + tm.assert_frame_equal(df, res3) + res4 = sql.read_sql_table("test_schema_other", conn, schema="other") + tm.assert_frame_equal(df, res4) + msg = "Table test_schema_other not found" + with pytest.raises(ValueError, match=msg): + sql.read_sql_table("test_schema_other", conn, schema="public") + + # different if_exists options + + # create a schema + with conn.connect() as con: + with con.begin(): + con.exec_driver_sql("DROP SCHEMA IF EXISTS other CASCADE;") + con.exec_driver_sql("CREATE SCHEMA other;") + + # write dataframe with different if_exists options + assert ( + df.to_sql(name="test_schema_other", con=conn, schema="other", index=False) == 2 + ) + df.to_sql( + name="test_schema_other", + con=conn, + schema="other", + index=False, + if_exists="replace", + ) + assert ( + df.to_sql( + name="test_schema_other", + con=conn, + schema="other", + index=False, + if_exists="append", + ) + == 2 + ) + res = sql.read_sql_table("test_schema_other", conn, schema="other") + tm.assert_frame_equal(concat([df, df], ignore_index=True), res) + + +@pytest.mark.db +def test_self_join_date_columns(postgresql_psycopg2_engine): + # GH 44421 + conn = postgresql_psycopg2_engine + from sqlalchemy.sql import text + + create_table = text( + """ + CREATE TABLE person + ( + id serial constraint person_pkey primary key, + created_dt timestamp with time zone + ); + + INSERT INTO person + VALUES (1, '2021-01-01T00:00:00Z'); + """ + ) + with conn.connect() as con: + with con.begin(): + con.execute(create_table) + + sql_query = ( + 'SELECT * FROM "person" AS p1 INNER JOIN "person" AS p2 ON p1.id = p2.id;' + ) + result = pd.read_sql(sql_query, conn) + expected = DataFrame( + [[1, Timestamp("2021", tz="UTC")] * 2], columns=["id", "created_dt"] * 2 + ) + tm.assert_frame_equal(result, expected) + + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("person") + + +def test_create_and_drop_table(sqlite_engine): + conn = sqlite_engine + temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) + with sql.SQLDatabase(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(temp_frame, "drop_test_frame") == 4 + + assert pandasSQL.has_table("drop_test_frame") + + with pandasSQL.run_transaction(): + pandasSQL.drop_table("drop_test_frame") + + assert not pandasSQL.has_table("drop_test_frame") + + +def test_sqlite_datetime_date(sqlite_buildin): + conn = sqlite_buildin + df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"]) + assert df.to_sql(name="test_date", con=conn, index=False) == 2 + res = read_sql_query("SELECT * FROM test_date", conn) + # comes back as strings + tm.assert_frame_equal(res, df.astype(str)) + + +@pytest.mark.parametrize("tz_aware", [False, True]) +def test_sqlite_datetime_time(tz_aware, sqlite_buildin): + conn = sqlite_buildin + # test support for datetime.time, GH #8341 + if not tz_aware: + tz_times = [time(9, 0, 0), time(9, 1, 30)] + else: + tz_dt = date_range("2013-01-01 09:00:00", periods=2, tz="US/Pacific") + tz_times = Series(tz_dt.to_pydatetime()).map(lambda dt: dt.timetz()) + + df = DataFrame(tz_times, columns=["a"]) + + assert df.to_sql(name="test_time", con=conn, index=False) == 2 + res = read_sql_query("SELECT * FROM test_time", conn) + # comes back as strings + expected = df.map(lambda _: _.strftime("%H:%M:%S.%f")) + tm.assert_frame_equal(res, expected) + + +def get_sqlite_column_type(conn, table, column): + recs = conn.execute(f"PRAGMA table_info({table})") + for cid, name, ctype, not_null, default, pk in recs: + if name == column: + return ctype + raise ValueError(f"Table {table}, column {column} not found") + + +def test_sqlite_test_dtype(sqlite_buildin): + conn = sqlite_buildin + cols = ["A", "B"] + data = [(0.8, True), (0.9, None)] + df = DataFrame(data, columns=cols) + assert df.to_sql(name="dtype_test", con=conn) == 2 + assert df.to_sql(name="dtype_test2", con=conn, dtype={"B": "STRING"}) == 2 + + # sqlite stores Boolean values as INTEGER + assert get_sqlite_column_type(conn, "dtype_test", "B") == "INTEGER" + + assert get_sqlite_column_type(conn, "dtype_test2", "B") == "STRING" + msg = r"B \(\) not a string" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="error", con=conn, dtype={"B": bool}) + + # single dtype + assert df.to_sql(name="single_dtype_test", con=conn, dtype="STRING") == 2 + assert get_sqlite_column_type(conn, "single_dtype_test", "A") == "STRING" + assert get_sqlite_column_type(conn, "single_dtype_test", "B") == "STRING" + + +def test_sqlite_notna_dtype(sqlite_buildin): + conn = sqlite_buildin + cols = { + "Bool": Series([True, None]), + "Date": Series([datetime(2012, 5, 1), None]), + "Int": Series([1, None], dtype="object"), + "Float": Series([1.1, None]), + } + df = DataFrame(cols) + + tbl = "notna_dtype_test" + assert df.to_sql(name=tbl, con=conn) == 2 + + assert get_sqlite_column_type(conn, tbl, "Bool") == "INTEGER" + assert get_sqlite_column_type(conn, tbl, "Date") == "TIMESTAMP" + assert get_sqlite_column_type(conn, tbl, "Int") == "INTEGER" + assert get_sqlite_column_type(conn, tbl, "Float") == "REAL" + + +def test_sqlite_illegal_names(sqlite_buildin): + # For sqlite, these should work fine + conn = sqlite_buildin + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) + + msg = "Empty table or column name specified" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="", con=conn) + + for ndx, weird_name in enumerate( + [ + "test_weird_name]", + "test_weird_name[", + "test_weird_name`", + 'test_weird_name"', + "test_weird_name'", + "_b.test_weird_name_01-30", + '"_b.test_weird_name_01-30"', + "99beginswithnumber", + "12345", + "\xe9", + ] + ): + assert df.to_sql(name=weird_name, con=conn) == 2 + sql.table_exists(weird_name, conn) + + df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name]) + c_tbl = f"test_weird_col_name{ndx:d}" + assert df2.to_sql(name=c_tbl, con=conn) == 2 + sql.table_exists(c_tbl, conn) + + +def format_query(sql, *args): + _formatters = { + datetime: "'{}'".format, + str: "'{}'".format, + np.str_: "'{}'".format, + bytes: "'{}'".format, + float: "{:.8f}".format, + int: "{:d}".format, + type(None): lambda x: "NULL", + np.float64: "{:.10f}".format, + bool: "'{!s}'".format, + } + processed_args = [] + for arg in args: + if isinstance(arg, float) and isna(arg): + arg = None + + formatter = _formatters[type(arg)] + processed_args.append(formatter(arg)) + + return sql % tuple(processed_args) + + +def tquery(query, con=None): + """Replace removed sql.tquery function""" + with sql.pandasSQL_builder(con) as pandas_sql: + res = pandas_sql.execute(query).fetchall() + return None if res is None else list(res) + + +def test_xsqlite_basic(sqlite_buildin): + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + assert sql.to_sql(frame, name="test_table", con=sqlite_buildin, index=False) == 10 + result = sql.read_sql("select * from test_table", sqlite_buildin) + + # HACK! Change this once indexes are handled properly. + result.index = frame.index + + expected = frame + tm.assert_frame_equal(result, frame) + + frame["txt"] = ["a"] * len(frame) + frame2 = frame.copy() + new_idx = Index(np.arange(len(frame2)), dtype=np.int64) + 10 + frame2["Idx"] = new_idx.copy() + assert sql.to_sql(frame2, name="test_table2", con=sqlite_buildin, index=False) == 10 + result = sql.read_sql("select * from test_table2", sqlite_buildin, index_col="Idx") + expected = frame.copy() + expected.index = new_idx + expected.index.name = "Idx" + tm.assert_frame_equal(expected, result) + + +def test_xsqlite_write_row_by_row(sqlite_buildin): + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + frame.iloc[0, 0] = np.nan + create_sql = sql.get_schema(frame, "test") + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + + ins = "INSERT INTO test VALUES (%s, %s, %s, %s)" + for _, row in frame.iterrows(): + fmt_sql = format_query(ins, *row) + tquery(fmt_sql, con=sqlite_buildin) + + sqlite_buildin.commit() + + result = sql.read_sql("select * from test", con=sqlite_buildin) + result.index = frame.index + tm.assert_frame_equal(result, frame, rtol=1e-3) + + +def test_xsqlite_execute(sqlite_buildin): + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + create_sql = sql.get_schema(frame, "test") + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + ins = "INSERT INTO test VALUES (?, ?, ?, ?)" + + row = frame.iloc[0] + with sql.pandasSQL_builder(sqlite_buildin) as pandas_sql: + pandas_sql.execute(ins, tuple(row)) + sqlite_buildin.commit() + + result = sql.read_sql("select * from test", sqlite_buildin) + result.index = frame.index[:1] + tm.assert_frame_equal(result, frame[:1]) + + +def test_xsqlite_schema(sqlite_buildin): + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + create_sql = sql.get_schema(frame, "test") + lines = create_sql.splitlines() + for line in lines: + tokens = line.split(" ") + if len(tokens) == 2 and tokens[0] == "A": + assert tokens[1] == "DATETIME" + + create_sql = sql.get_schema(frame, "test", keys=["A", "B"]) + lines = create_sql.splitlines() + assert 'PRIMARY KEY ("A", "B")' in create_sql + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + + +def test_xsqlite_execute_fail(sqlite_buildin): + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a, b) + ); + """ + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + + with sql.pandasSQL_builder(sqlite_buildin) as pandas_sql: + pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)') + pandas_sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)') + + with pytest.raises(sql.DatabaseError, match="Execution failed on sql"): + pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 7)') + + +def test_xsqlite_execute_closed_connection(): + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a, b) + ); + """ + with contextlib.closing(sqlite3.connect(":memory:")) as conn: + cur = conn.cursor() + cur.execute(create_sql) + + with sql.pandasSQL_builder(conn) as pandas_sql: + pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)') + + msg = "Cannot operate on a closed database." + with pytest.raises(sqlite3.ProgrammingError, match=msg): + tquery("select * from test", con=conn) + + +def test_xsqlite_keyword_as_column_names(sqlite_buildin): + df = DataFrame({"From": np.ones(5)}) + assert sql.to_sql(df, con=sqlite_buildin, name="testkeywords", index=False) == 5 + + +def test_xsqlite_onecolumn_of_integer(sqlite_buildin): + # GH 3628 + # a column_of_integers dataframe should transfer well to sql + + mono_df = DataFrame([1, 2], columns=["c0"]) + assert sql.to_sql(mono_df, con=sqlite_buildin, name="mono_df", index=False) == 2 + # computing the sum via sql + con_x = sqlite_buildin + the_sum = sum(my_c0[0] for my_c0 in con_x.execute("select * from mono_df")) + # it should not fail, and gives 3 ( Issue #3628 ) + assert the_sum == 3 + + result = sql.read_sql("select * from mono_df", con_x) + tm.assert_frame_equal(result, mono_df) + + +def test_xsqlite_if_exists(sqlite_buildin): + df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]}) + df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]}) + table_name = "table_if_exists" + sql_select = f"SELECT * FROM {table_name}" + + msg = "'notvalidvalue' is not valid for if_exists" + with pytest.raises(ValueError, match=msg): + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="notvalidvalue", + ) + drop_table(table_name, sqlite_buildin) + + # test if_exists='fail' + sql.to_sql( + frame=df_if_exists_1, con=sqlite_buildin, name=table_name, if_exists="fail" + ) + msg = "Table 'table_if_exists' already exists" + with pytest.raises(ValueError, match=msg): + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="fail", + ) + # test if_exists='replace' + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="replace", + index=False, + ) + assert tquery(sql_select, con=sqlite_buildin) == [(1, "A"), (2, "B")] + assert ( + sql.to_sql( + frame=df_if_exists_2, + con=sqlite_buildin, + name=table_name, + if_exists="replace", + index=False, + ) + == 3 + ) + assert tquery(sql_select, con=sqlite_buildin) == [(3, "C"), (4, "D"), (5, "E")] + drop_table(table_name, sqlite_buildin) + + # test if_exists='append' + assert ( + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="fail", + index=False, + ) + == 2 + ) + assert tquery(sql_select, con=sqlite_buildin) == [(1, "A"), (2, "B")] + assert ( + sql.to_sql( + frame=df_if_exists_2, + con=sqlite_buildin, + name=table_name, + if_exists="append", + index=False, + ) + == 3 + ) + assert tquery(sql_select, con=sqlite_buildin) == [ + (1, "A"), + (2, "B"), + (3, "C"), + (4, "D"), + (5, "E"), + ] + drop_table(table_name, sqlite_buildin) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/test_stata.py b/venv/lib/python3.10/site-packages/pandas/tests/io/test_stata.py new file mode 100644 index 0000000000000000000000000000000000000000..6bd74faa8a3dbbae94f2a8fd79aa23bf677e6220 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/test_stata.py @@ -0,0 +1,2381 @@ +import bz2 +import datetime as dt +from datetime import datetime +import gzip +import io +import os +import struct +import tarfile +import zipfile + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import CategoricalDtype +import pandas._testing as tm +from pandas.core.frame import ( + DataFrame, + Series, +) + +from pandas.io.parsers import read_csv +from pandas.io.stata import ( + CategoricalConversionWarning, + InvalidColumnName, + PossiblePrecisionLoss, + StataMissingValue, + StataReader, + StataWriter, + StataWriterUTF8, + ValueLabelTypeMismatch, + read_stata, +) + + +@pytest.fixture +def mixed_frame(): + return DataFrame( + { + "a": [1, 2, 3, 4], + "b": [1.0, 3.0, 27.0, 81.0], + "c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"], + } + ) + + +@pytest.fixture +def parsed_114(datapath): + dta14_114 = datapath("io", "data", "stata", "stata5_114.dta") + parsed_114 = read_stata(dta14_114, convert_dates=True) + parsed_114.index.name = "index" + return parsed_114 + + +class TestStata: + def read_dta(self, file): + # Legacy default reader configuration + return read_stata(file, convert_dates=True) + + def read_csv(self, file): + return read_csv(file, parse_dates=True) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_empty_dta(self, version): + empty_ds = DataFrame(columns=["unit"]) + # GH 7369, make sure can read a 0-obs dta file + with tm.ensure_clean() as path: + empty_ds.to_stata(path, write_index=False, version=version) + empty_ds2 = read_stata(path) + tm.assert_frame_equal(empty_ds, empty_ds2) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_empty_dta_with_dtypes(self, version): + # GH 46240 + # Fixing above bug revealed that types are not correctly preserved when + # writing empty DataFrames + empty_df_typed = DataFrame( + { + "i8": np.array([0], dtype=np.int8), + "i16": np.array([0], dtype=np.int16), + "i32": np.array([0], dtype=np.int32), + "i64": np.array([0], dtype=np.int64), + "u8": np.array([0], dtype=np.uint8), + "u16": np.array([0], dtype=np.uint16), + "u32": np.array([0], dtype=np.uint32), + "u64": np.array([0], dtype=np.uint64), + "f32": np.array([0], dtype=np.float32), + "f64": np.array([0], dtype=np.float64), + } + ) + expected = empty_df_typed.copy() + # No uint# support. Downcast since values in range for int# + expected["u8"] = expected["u8"].astype(np.int8) + expected["u16"] = expected["u16"].astype(np.int16) + expected["u32"] = expected["u32"].astype(np.int32) + # No int64 supported at all. Downcast since values in range for int32 + expected["u64"] = expected["u64"].astype(np.int32) + expected["i64"] = expected["i64"].astype(np.int32) + + # GH 7369, make sure can read a 0-obs dta file + with tm.ensure_clean() as path: + empty_df_typed.to_stata(path, write_index=False, version=version) + empty_reread = read_stata(path) + tm.assert_frame_equal(expected, empty_reread) + tm.assert_series_equal(expected.dtypes, empty_reread.dtypes) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_index_col_none(self, version): + df = DataFrame({"a": range(5), "b": ["b1", "b2", "b3", "b4", "b5"]}) + # GH 7369, make sure can read a 0-obs dta file + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False, version=version) + read_df = read_stata(path) + + assert isinstance(read_df.index, pd.RangeIndex) + expected = df.copy() + expected["a"] = expected["a"].astype(np.int32) + tm.assert_frame_equal(read_df, expected, check_index_type=True) + + @pytest.mark.parametrize("file", ["stata1_114", "stata1_117"]) + def test_read_dta1(self, file, datapath): + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + + # Pandas uses np.nan as missing value. + # Thus, all columns will be of type float, regardless of their name. + expected = DataFrame( + [(np.nan, np.nan, np.nan, np.nan, np.nan)], + columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"], + ) + + # this is an oddity as really the nan should be float64, but + # the casting doesn't fail so need to match stata here + expected["float_miss"] = expected["float_miss"].astype(np.float32) + + tm.assert_frame_equal(parsed, expected) + + def test_read_dta2(self, datapath): + expected = DataFrame.from_records( + [ + ( + datetime(2006, 11, 19, 23, 13, 20), + 1479596223000, + datetime(2010, 1, 20), + datetime(2010, 1, 8), + datetime(2010, 1, 1), + datetime(1974, 7, 1), + datetime(2010, 1, 1), + datetime(2010, 1, 1), + ), + ( + datetime(1959, 12, 31, 20, 3, 20), + -1479590, + datetime(1953, 10, 2), + datetime(1948, 6, 10), + datetime(1955, 1, 1), + datetime(1955, 7, 1), + datetime(1955, 1, 1), + datetime(2, 1, 1), + ), + (pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT), + ], + columns=[ + "datetime_c", + "datetime_big_c", + "date", + "weekly_date", + "monthly_date", + "quarterly_date", + "half_yearly_date", + "yearly_date", + ], + ) + expected["yearly_date"] = expected["yearly_date"].astype("O") + + path1 = datapath("io", "data", "stata", "stata2_114.dta") + path2 = datapath("io", "data", "stata", "stata2_115.dta") + path3 = datapath("io", "data", "stata", "stata2_117.dta") + + with tm.assert_produces_warning(UserWarning): + parsed_114 = self.read_dta(path1) + with tm.assert_produces_warning(UserWarning): + parsed_115 = self.read_dta(path2) + with tm.assert_produces_warning(UserWarning): + parsed_117 = self.read_dta(path3) + # FIXME: don't leave commented-out + # 113 is buggy due to limits of date format support in Stata + # parsed_113 = self.read_dta( + # datapath("io", "data", "stata", "stata2_113.dta") + # ) + + # FIXME: don't leave commented-out + # buggy test because of the NaT comparison on certain platforms + # Format 113 test fails since it does not support tc and tC formats + # tm.assert_frame_equal(parsed_113, expected) + tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True) + tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True) + tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True) + + @pytest.mark.parametrize( + "file", ["stata3_113", "stata3_114", "stata3_115", "stata3_117"] + ) + def test_read_dta3(self, file, datapath): + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + + # match stata here + expected = self.read_csv(datapath("io", "data", "stata", "stata3.csv")) + expected = expected.astype(np.float32) + expected["year"] = expected["year"].astype(np.int16) + expected["quarter"] = expected["quarter"].astype(np.int8) + + tm.assert_frame_equal(parsed, expected) + + @pytest.mark.parametrize( + "file", ["stata4_113", "stata4_114", "stata4_115", "stata4_117"] + ) + def test_read_dta4(self, file, datapath): + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + + expected = DataFrame.from_records( + [ + ["one", "ten", "one", "one", "one"], + ["two", "nine", "two", "two", "two"], + ["three", "eight", "three", "three", "three"], + ["four", "seven", 4, "four", "four"], + ["five", "six", 5, np.nan, "five"], + ["six", "five", 6, np.nan, "six"], + ["seven", "four", 7, np.nan, "seven"], + ["eight", "three", 8, np.nan, "eight"], + ["nine", "two", 9, np.nan, "nine"], + ["ten", "one", "ten", np.nan, "ten"], + ], + columns=[ + "fully_labeled", + "fully_labeled2", + "incompletely_labeled", + "labeled_with_missings", + "float_labelled", + ], + ) + + # these are all categoricals + for col in expected: + orig = expected[col].copy() + + categories = np.asarray(expected["fully_labeled"][orig.notna()]) + if col == "incompletely_labeled": + categories = orig + + cat = orig.astype("category")._values + cat = cat.set_categories(categories, ordered=True) + cat.categories.rename(None, inplace=True) + + expected[col] = cat + + # stata doesn't save .category metadata + tm.assert_frame_equal(parsed, expected) + + # File containing strls + def test_read_dta12(self, datapath): + parsed_117 = self.read_dta(datapath("io", "data", "stata", "stata12_117.dta")) + expected = DataFrame.from_records( + [ + [1, "abc", "abcdefghi"], + [3, "cba", "qwertywertyqwerty"], + [93, "", "strl"], + ], + columns=["x", "y", "z"], + ) + + tm.assert_frame_equal(parsed_117, expected, check_dtype=False) + + def test_read_dta18(self, datapath): + parsed_118 = self.read_dta(datapath("io", "data", "stata", "stata14_118.dta")) + parsed_118["Bytes"] = parsed_118["Bytes"].astype("O") + expected = DataFrame.from_records( + [ + ["Cat", "Bogota", "Bogotá", 1, 1.0, "option b Ünicode", 1.0], + ["Dog", "Boston", "Uzunköprü", np.nan, np.nan, np.nan, np.nan], + ["Plane", "Rome", "Tromsø", 0, 0.0, "option a", 0.0], + ["Potato", "Tokyo", "Elâzığ", -4, 4.0, 4, 4], # noqa: RUF001 + ["", "", "", 0, 0.3332999, "option a", 1 / 3.0], + ], + columns=[ + "Things", + "Cities", + "Unicode_Cities_Strl", + "Ints", + "Floats", + "Bytes", + "Longs", + ], + ) + expected["Floats"] = expected["Floats"].astype(np.float32) + for col in parsed_118.columns: + tm.assert_almost_equal(parsed_118[col], expected[col]) + + with StataReader(datapath("io", "data", "stata", "stata14_118.dta")) as rdr: + vl = rdr.variable_labels() + vl_expected = { + "Unicode_Cities_Strl": "Here are some strls with Ünicode chars", + "Longs": "long data", + "Things": "Here are some things", + "Bytes": "byte data", + "Ints": "int data", + "Cities": "Here are some cities", + "Floats": "float data", + } + tm.assert_dict_equal(vl, vl_expected) + + assert rdr.data_label == "This is a Ünicode data label" + + def test_read_write_dta5(self): + original = DataFrame( + [(np.nan, np.nan, np.nan, np.nan, np.nan)], + columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"], + ) + original.index.name = "index" + + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates=None) + written_and_read_again = self.read_dta(path) + + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + def test_write_dta6(self, datapath): + original = self.read_csv(datapath("io", "data", "stata", "stata3.csv")) + original.index.name = "index" + original.index = original.index.astype(np.int32) + original["year"] = original["year"].astype(np.int32) + original["quarter"] = original["quarter"].astype(np.int32) + + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates=None) + written_and_read_again = self.read_dta(path) + tm.assert_frame_equal( + written_and_read_again.set_index("index"), + original, + check_index_type=False, + ) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_write_dta10(self, version): + original = DataFrame( + data=[["string", "object", 1, 1.1, np.datetime64("2003-12-25")]], + columns=["string", "object", "integer", "floating", "datetime"], + ) + original["object"] = Series(original["object"], dtype=object) + original.index.name = "index" + original.index = original.index.astype(np.int32) + original["integer"] = original["integer"].astype(np.int32) + + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates={"datetime": "tc"}, version=version) + written_and_read_again = self.read_dta(path) + # original.index is np.int32, read index is np.int64 + tm.assert_frame_equal( + written_and_read_again.set_index("index"), + original, + check_index_type=False, + ) + + def test_stata_doc_examples(self): + with tm.ensure_clean() as path: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") + ) + df.to_stata(path) + + def test_write_preserves_original(self): + # 9795 + + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd") + ) + df.loc[2, "a":"c"] = np.nan + df_copy = df.copy() + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False) + tm.assert_frame_equal(df, df_copy) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_encoding(self, version, datapath): + # GH 4626, proper encoding handling + raw = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta")) + encoded = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta")) + result = encoded.kreis1849[0] + + expected = raw.kreis1849[0] + assert result == expected + assert isinstance(result, str) + + with tm.ensure_clean() as path: + encoded.to_stata(path, write_index=False, version=version) + reread_encoded = read_stata(path) + tm.assert_frame_equal(encoded, reread_encoded) + + def test_read_write_dta11(self): + original = DataFrame( + [(1, 2, 3, 4)], + columns=[ + "good", + "b\u00E4d", + "8number", + "astringwithmorethan32characters______", + ], + ) + formatted = DataFrame( + [(1, 2, 3, 4)], + columns=["good", "b_d", "_8number", "astringwithmorethan32characters_"], + ) + formatted.index.name = "index" + formatted = formatted.astype(np.int32) + + with tm.ensure_clean() as path: + with tm.assert_produces_warning(InvalidColumnName): + original.to_stata(path, convert_dates=None) + + written_and_read_again = self.read_dta(path) + + expected = formatted.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_write_dta12(self, version): + original = DataFrame( + [(1, 2, 3, 4, 5, 6)], + columns=[ + "astringwithmorethan32characters_1", + "astringwithmorethan32characters_2", + "+", + "-", + "short", + "delete", + ], + ) + formatted = DataFrame( + [(1, 2, 3, 4, 5, 6)], + columns=[ + "astringwithmorethan32characters_", + "_0astringwithmorethan32character", + "_", + "_1_", + "_short", + "_delete", + ], + ) + formatted.index.name = "index" + formatted = formatted.astype(np.int32) + + with tm.ensure_clean() as path: + with tm.assert_produces_warning(InvalidColumnName): + original.to_stata(path, convert_dates=None, version=version) + # should get a warning for that format. + + written_and_read_again = self.read_dta(path) + + expected = formatted.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + def test_read_write_dta13(self): + s1 = Series(2**9, dtype=np.int16) + s2 = Series(2**17, dtype=np.int32) + s3 = Series(2**33, dtype=np.int64) + original = DataFrame({"int16": s1, "int32": s2, "int64": s3}) + original.index.name = "index" + + formatted = original + formatted["int64"] = formatted["int64"].astype(np.float64) + + with tm.ensure_clean() as path: + original.to_stata(path) + written_and_read_again = self.read_dta(path) + + expected = formatted.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + @pytest.mark.parametrize( + "file", ["stata5_113", "stata5_114", "stata5_115", "stata5_117"] + ) + def test_read_write_reread_dta14(self, file, parsed_114, version, datapath): + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + parsed.index.name = "index" + + tm.assert_frame_equal(parsed_114, parsed) + + with tm.ensure_clean() as path: + parsed_114.to_stata(path, convert_dates={"date_td": "td"}, version=version) + written_and_read_again = self.read_dta(path) + + expected = parsed_114.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + @pytest.mark.parametrize( + "file", ["stata6_113", "stata6_114", "stata6_115", "stata6_117"] + ) + def test_read_write_reread_dta15(self, file, datapath): + expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv")) + expected["byte_"] = expected["byte_"].astype(np.int8) + expected["int_"] = expected["int_"].astype(np.int16) + expected["long_"] = expected["long_"].astype(np.int32) + expected["float_"] = expected["float_"].astype(np.float32) + expected["double_"] = expected["double_"].astype(np.float64) + expected["date_td"] = expected["date_td"].apply( + datetime.strptime, args=("%Y-%m-%d",) + ) + + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + + tm.assert_frame_equal(expected, parsed) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_timestamp_and_label(self, version): + original = DataFrame([(1,)], columns=["variable"]) + time_stamp = datetime(2000, 2, 29, 14, 21) + data_label = "This is a data file." + with tm.ensure_clean() as path: + original.to_stata( + path, time_stamp=time_stamp, data_label=data_label, version=version + ) + + with StataReader(path) as reader: + assert reader.time_stamp == "29 Feb 2000 14:21" + assert reader.data_label == data_label + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_invalid_timestamp(self, version): + original = DataFrame([(1,)], columns=["variable"]) + time_stamp = "01 Jan 2000, 00:00:00" + with tm.ensure_clean() as path: + msg = "time_stamp should be datetime type" + with pytest.raises(ValueError, match=msg): + original.to_stata(path, time_stamp=time_stamp, version=version) + assert not os.path.isfile(path) + + def test_numeric_column_names(self): + original = DataFrame(np.reshape(np.arange(25.0), (5, 5))) + original.index.name = "index" + with tm.ensure_clean() as path: + # should get a warning for that format. + with tm.assert_produces_warning(InvalidColumnName): + original.to_stata(path) + + written_and_read_again = self.read_dta(path) + + written_and_read_again = written_and_read_again.set_index("index") + columns = list(written_and_read_again.columns) + convert_col_name = lambda x: int(x[1]) + written_and_read_again.columns = map(convert_col_name, columns) + + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(expected, written_and_read_again) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_nan_to_missing_value(self, version): + s1 = Series(np.arange(4.0), dtype=np.float32) + s2 = Series(np.arange(4.0), dtype=np.float64) + s1[::2] = np.nan + s2[1::2] = np.nan + original = DataFrame({"s1": s1, "s2": s2}) + original.index.name = "index" + + with tm.ensure_clean() as path: + original.to_stata(path, version=version) + written_and_read_again = self.read_dta(path) + + written_and_read_again = written_and_read_again.set_index("index") + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again, expected) + + def test_no_index(self): + columns = ["x", "y"] + original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns) + original.index.name = "index_not_written" + with tm.ensure_clean() as path: + original.to_stata(path, write_index=False) + written_and_read_again = self.read_dta(path) + with pytest.raises(KeyError, match=original.index.name): + written_and_read_again["index_not_written"] + + def test_string_no_dates(self): + s1 = Series(["a", "A longer string"]) + s2 = Series([1.0, 2.0], dtype=np.float64) + original = DataFrame({"s1": s1, "s2": s2}) + original.index.name = "index" + with tm.ensure_clean() as path: + original.to_stata(path) + written_and_read_again = self.read_dta(path) + + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + def test_large_value_conversion(self): + s0 = Series([1, 99], dtype=np.int8) + s1 = Series([1, 127], dtype=np.int8) + s2 = Series([1, 2**15 - 1], dtype=np.int16) + s3 = Series([1, 2**63 - 1], dtype=np.int64) + original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3}) + original.index.name = "index" + with tm.ensure_clean() as path: + with tm.assert_produces_warning(PossiblePrecisionLoss): + original.to_stata(path) + + written_and_read_again = self.read_dta(path) + + modified = original.copy() + modified["s1"] = Series(modified["s1"], dtype=np.int16) + modified["s2"] = Series(modified["s2"], dtype=np.int32) + modified["s3"] = Series(modified["s3"], dtype=np.float64) + modified.index = original.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), modified) + + def test_dates_invalid_column(self): + original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)]) + original.index.name = "index" + with tm.ensure_clean() as path: + with tm.assert_produces_warning(InvalidColumnName): + original.to_stata(path, convert_dates={0: "tc"}) + + written_and_read_again = self.read_dta(path) + + modified = original.copy() + modified.columns = ["_0"] + modified.index = original.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), modified) + + def test_105(self, datapath): + # Data obtained from: + # http://go.worldbank.org/ZXY29PVJ21 + dpath = datapath("io", "data", "stata", "S4_EDUC1.dta") + df = read_stata(dpath) + df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]] + df0 = DataFrame(df0) + df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"] + df0["clustnum"] = df0["clustnum"].astype(np.int16) + df0["pri_schl"] = df0["pri_schl"].astype(np.int8) + df0["psch_num"] = df0["psch_num"].astype(np.int8) + df0["psch_dis"] = df0["psch_dis"].astype(np.float32) + tm.assert_frame_equal(df.head(3), df0) + + def test_value_labels_old_format(self, datapath): + # GH 19417 + # + # Test that value_labels() returns an empty dict if the file format + # predates supporting value labels. + dpath = datapath("io", "data", "stata", "S4_EDUC1.dta") + with StataReader(dpath) as reader: + assert reader.value_labels() == {} + + def test_date_export_formats(self): + columns = ["tc", "td", "tw", "tm", "tq", "th", "ty"] + conversions = {c: c for c in columns} + data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns) + original = DataFrame([data], columns=columns) + original.index.name = "index" + expected_values = [ + datetime(2006, 11, 20, 23, 13, 20), # Time + datetime(2006, 11, 20), # Day + datetime(2006, 11, 19), # Week + datetime(2006, 11, 1), # Month + datetime(2006, 10, 1), # Quarter year + datetime(2006, 7, 1), # Half year + datetime(2006, 1, 1), + ] # Year + + expected = DataFrame( + [expected_values], + index=pd.Index([0], dtype=np.int32, name="index"), + columns=columns, + ) + + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates=conversions) + written_and_read_again = self.read_dta(path) + + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + def test_write_missing_strings(self): + original = DataFrame([["1"], [None]], columns=["foo"]) + + expected = DataFrame( + [["1"], [""]], + index=pd.Index([0, 1], dtype=np.int32, name="index"), + columns=["foo"], + ) + + with tm.ensure_clean() as path: + original.to_stata(path) + written_and_read_again = self.read_dta(path) + + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + @pytest.mark.parametrize("byteorder", [">", "<"]) + def test_bool_uint(self, byteorder, version): + s0 = Series([0, 1, True], dtype=np.bool_) + s1 = Series([0, 1, 100], dtype=np.uint8) + s2 = Series([0, 1, 255], dtype=np.uint8) + s3 = Series([0, 1, 2**15 - 100], dtype=np.uint16) + s4 = Series([0, 1, 2**16 - 1], dtype=np.uint16) + s5 = Series([0, 1, 2**31 - 100], dtype=np.uint32) + s6 = Series([0, 1, 2**32 - 1], dtype=np.uint32) + + original = DataFrame( + {"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6} + ) + original.index.name = "index" + expected = original.copy() + expected.index = original.index.astype(np.int32) + expected_types = ( + np.int8, + np.int8, + np.int16, + np.int16, + np.int32, + np.int32, + np.float64, + ) + for c, t in zip(expected.columns, expected_types): + expected[c] = expected[c].astype(t) + + with tm.ensure_clean() as path: + original.to_stata(path, byteorder=byteorder, version=version) + written_and_read_again = self.read_dta(path) + + written_and_read_again = written_and_read_again.set_index("index") + tm.assert_frame_equal(written_and_read_again, expected) + + def test_variable_labels(self, datapath): + with StataReader(datapath("io", "data", "stata", "stata7_115.dta")) as rdr: + sr_115 = rdr.variable_labels() + with StataReader(datapath("io", "data", "stata", "stata7_117.dta")) as rdr: + sr_117 = rdr.variable_labels() + keys = ("var1", "var2", "var3") + labels = ("label1", "label2", "label3") + for k, v in sr_115.items(): + assert k in sr_117 + assert v == sr_117[k] + assert k in keys + assert v in labels + + def test_minimal_size_col(self): + str_lens = (1, 100, 244) + s = {} + for str_len in str_lens: + s["s" + str(str_len)] = Series( + ["a" * str_len, "b" * str_len, "c" * str_len] + ) + original = DataFrame(s) + with tm.ensure_clean() as path: + original.to_stata(path, write_index=False) + + with StataReader(path) as sr: + sr._ensure_open() # The `_*list` variables are initialized here + for variable, fmt, typ in zip(sr._varlist, sr._fmtlist, sr._typlist): + assert int(variable[1:]) == int(fmt[1:-1]) + assert int(variable[1:]) == typ + + def test_excessively_long_string(self): + str_lens = (1, 244, 500) + s = {} + for str_len in str_lens: + s["s" + str(str_len)] = Series( + ["a" * str_len, "b" * str_len, "c" * str_len] + ) + original = DataFrame(s) + msg = ( + r"Fixed width strings in Stata \.dta files are limited to 244 " + r"\(or fewer\)\ncharacters\. Column 's500' does not satisfy " + r"this restriction\. Use the\n'version=117' parameter to write " + r"the newer \(Stata 13 and later\) format\." + ) + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + original.to_stata(path) + + def test_missing_value_generator(self): + types = ("b", "h", "l") + df = DataFrame([[0.0]], columns=["float_"]) + with tm.ensure_clean() as path: + df.to_stata(path) + with StataReader(path) as rdr: + valid_range = rdr.VALID_RANGE + expected_values = ["." + chr(97 + i) for i in range(26)] + expected_values.insert(0, ".") + for t in types: + offset = valid_range[t][1] + for i in range(27): + val = StataMissingValue(offset + 1 + i) + assert val.string == expected_values[i] + + # Test extremes for floats + val = StataMissingValue(struct.unpack(" DataFrame: + """ + Emulate the categorical casting behavior we expect from roundtripping. + """ + for col in from_frame: + ser = from_frame[col] + if isinstance(ser.dtype, CategoricalDtype): + cat = ser._values.remove_unused_categories() + if cat.categories.dtype == object: + categories = pd.Index._with_infer(cat.categories._values) + cat = cat.set_categories(categories) + from_frame[col] = cat + return from_frame + + def test_iterator(self, datapath): + fname = datapath("io", "data", "stata", "stata3_117.dta") + + parsed = read_stata(fname) + + with read_stata(fname, iterator=True) as itr: + chunk = itr.read(5) + tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) + + with read_stata(fname, chunksize=5) as itr: + chunk = list(itr) + tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0]) + + with read_stata(fname, iterator=True) as itr: + chunk = itr.get_chunk(5) + tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) + + with read_stata(fname, chunksize=5) as itr: + chunk = itr.get_chunk() + tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) + + # GH12153 + with read_stata(fname, chunksize=4) as itr: + from_chunks = pd.concat(itr) + tm.assert_frame_equal(parsed, from_chunks) + + @pytest.mark.filterwarnings("ignore::UserWarning") + @pytest.mark.parametrize( + "file", + [ + "stata2_115", + "stata3_115", + "stata4_115", + "stata5_115", + "stata6_115", + "stata7_115", + "stata8_115", + "stata9_115", + "stata10_115", + "stata11_115", + ], + ) + @pytest.mark.parametrize("chunksize", [1, 2]) + @pytest.mark.parametrize("convert_categoricals", [False, True]) + @pytest.mark.parametrize("convert_dates", [False, True]) + def test_read_chunks_115( + self, file, chunksize, convert_categoricals, convert_dates, datapath + ): + fname = datapath("io", "data", "stata", f"{file}.dta") + + # Read the whole file + parsed = read_stata( + fname, + convert_categoricals=convert_categoricals, + convert_dates=convert_dates, + ) + + # Compare to what we get when reading by chunk + with read_stata( + fname, + iterator=True, + convert_dates=convert_dates, + convert_categoricals=convert_categoricals, + ) as itr: + pos = 0 + for j in range(5): + try: + chunk = itr.read(chunksize) + except StopIteration: + break + from_frame = parsed.iloc[pos : pos + chunksize, :].copy() + from_frame = self._convert_categorical(from_frame) + tm.assert_frame_equal( + from_frame, chunk, check_dtype=False, check_datetimelike_compat=True + ) + pos += chunksize + + def test_read_chunks_columns(self, datapath): + fname = datapath("io", "data", "stata", "stata3_117.dta") + columns = ["quarter", "cpi", "m1"] + chunksize = 2 + + parsed = read_stata(fname, columns=columns) + with read_stata(fname, iterator=True) as itr: + pos = 0 + for j in range(5): + chunk = itr.read(chunksize, columns=columns) + if chunk is None: + break + from_frame = parsed.iloc[pos : pos + chunksize, :] + tm.assert_frame_equal(from_frame, chunk, check_dtype=False) + pos += chunksize + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_write_variable_labels(self, version, mixed_frame): + # GH 13631, add support for writing variable labels + mixed_frame.index.name = "index" + variable_labels = {"a": "City Rank", "b": "City Exponent", "c": "City"} + with tm.ensure_clean() as path: + mixed_frame.to_stata(path, variable_labels=variable_labels, version=version) + with StataReader(path) as sr: + read_labels = sr.variable_labels() + expected_labels = { + "index": "", + "a": "City Rank", + "b": "City Exponent", + "c": "City", + } + assert read_labels == expected_labels + + variable_labels["index"] = "The Index" + with tm.ensure_clean() as path: + mixed_frame.to_stata(path, variable_labels=variable_labels, version=version) + with StataReader(path) as sr: + read_labels = sr.variable_labels() + assert read_labels == variable_labels + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_invalid_variable_labels(self, version, mixed_frame): + mixed_frame.index.name = "index" + variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"} + with tm.ensure_clean() as path: + msg = "Variable labels must be 80 characters or fewer" + with pytest.raises(ValueError, match=msg): + mixed_frame.to_stata( + path, variable_labels=variable_labels, version=version + ) + + @pytest.mark.parametrize("version", [114, 117]) + def test_invalid_variable_label_encoding(self, version, mixed_frame): + mixed_frame.index.name = "index" + variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"} + variable_labels["a"] = "invalid character Œ" + with tm.ensure_clean() as path: + with pytest.raises( + ValueError, match="Variable labels must contain only characters" + ): + mixed_frame.to_stata( + path, variable_labels=variable_labels, version=version + ) + + def test_write_variable_label_errors(self, mixed_frame): + values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"] + + variable_labels_utf8 = { + "a": "City Rank", + "b": "City Exponent", + "c": "".join(values), + } + + msg = ( + "Variable labels must contain only characters that can be " + "encoded in Latin-1" + ) + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + mixed_frame.to_stata(path, variable_labels=variable_labels_utf8) + + variable_labels_long = { + "a": "City Rank", + "b": "City Exponent", + "c": "A very, very, very long variable label " + "that is too long for Stata which means " + "that it has more than 80 characters", + } + + msg = "Variable labels must be 80 characters or fewer" + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + mixed_frame.to_stata(path, variable_labels=variable_labels_long) + + def test_default_date_conversion(self): + # GH 12259 + dates = [ + dt.datetime(1999, 12, 31, 12, 12, 12, 12000), + dt.datetime(2012, 12, 21, 12, 21, 12, 21000), + dt.datetime(1776, 7, 4, 7, 4, 7, 4000), + ] + original = DataFrame( + { + "nums": [1.0, 2.0, 3.0], + "strs": ["apple", "banana", "cherry"], + "dates": dates, + } + ) + + with tm.ensure_clean() as path: + original.to_stata(path, write_index=False) + reread = read_stata(path, convert_dates=True) + tm.assert_frame_equal(original, reread) + + original.to_stata(path, write_index=False, convert_dates={"dates": "tc"}) + direct = read_stata(path, convert_dates=True) + tm.assert_frame_equal(reread, direct) + + dates_idx = original.columns.tolist().index("dates") + original.to_stata(path, write_index=False, convert_dates={dates_idx: "tc"}) + direct = read_stata(path, convert_dates=True) + tm.assert_frame_equal(reread, direct) + + def test_unsupported_type(self): + original = DataFrame({"a": [1 + 2j, 2 + 4j]}) + + msg = "Data type complex128 not supported" + with pytest.raises(NotImplementedError, match=msg): + with tm.ensure_clean() as path: + original.to_stata(path) + + def test_unsupported_datetype(self): + dates = [ + dt.datetime(1999, 12, 31, 12, 12, 12, 12000), + dt.datetime(2012, 12, 21, 12, 21, 12, 21000), + dt.datetime(1776, 7, 4, 7, 4, 7, 4000), + ] + original = DataFrame( + { + "nums": [1.0, 2.0, 3.0], + "strs": ["apple", "banana", "cherry"], + "dates": dates, + } + ) + + msg = "Format %tC not implemented" + with pytest.raises(NotImplementedError, match=msg): + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates={"dates": "tC"}) + + dates = pd.date_range("1-1-1990", periods=3, tz="Asia/Hong_Kong") + original = DataFrame( + { + "nums": [1.0, 2.0, 3.0], + "strs": ["apple", "banana", "cherry"], + "dates": dates, + } + ) + with pytest.raises(NotImplementedError, match="Data type datetime64"): + with tm.ensure_clean() as path: + original.to_stata(path) + + def test_repeated_column_labels(self, datapath): + # GH 13923, 25772 + msg = """ +Value labels for column ethnicsn are not unique. These cannot be converted to +pandas categoricals. + +Either read the file with `convert_categoricals` set to False or use the +low level interface in `StataReader` to separately read the values and the +value_labels. + +The repeated labels are:\n-+\nwolof +""" + with pytest.raises(ValueError, match=msg): + read_stata( + datapath("io", "data", "stata", "stata15.dta"), + convert_categoricals=True, + ) + + def test_stata_111(self, datapath): + # 111 is an old version but still used by current versions of + # SAS when exporting to Stata format. We do not know of any + # on-line documentation for this version. + df = read_stata(datapath("io", "data", "stata", "stata7_111.dta")) + original = DataFrame( + { + "y": [1, 1, 1, 1, 1, 0, 0, np.nan, 0, 0], + "x": [1, 2, 1, 3, np.nan, 4, 3, 5, 1, 6], + "w": [2, np.nan, 5, 2, 4, 4, 3, 1, 2, 3], + "z": ["a", "b", "c", "d", "e", "", "g", "h", "i", "j"], + } + ) + original = original[["y", "x", "w", "z"]] + tm.assert_frame_equal(original, df) + + def test_out_of_range_double(self): + # GH 14618 + df = DataFrame( + { + "ColumnOk": [0.0, np.finfo(np.double).eps, 4.49423283715579e307], + "ColumnTooBig": [0.0, np.finfo(np.double).eps, np.finfo(np.double).max], + } + ) + msg = ( + r"Column ColumnTooBig has a maximum value \(.+\) outside the range " + r"supported by Stata \(.+\)" + ) + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + df.to_stata(path) + + def test_out_of_range_float(self): + original = DataFrame( + { + "ColumnOk": [ + 0.0, + np.finfo(np.float32).eps, + np.finfo(np.float32).max / 10.0, + ], + "ColumnTooBig": [ + 0.0, + np.finfo(np.float32).eps, + np.finfo(np.float32).max, + ], + } + ) + original.index.name = "index" + for col in original: + original[col] = original[col].astype(np.float32) + + with tm.ensure_clean() as path: + original.to_stata(path) + reread = read_stata(path) + + original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64) + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(reread.set_index("index"), expected) + + @pytest.mark.parametrize("infval", [np.inf, -np.inf]) + def test_inf(self, infval): + # GH 45350 + df = DataFrame({"WithoutInf": [0.0, 1.0], "WithInf": [2.0, infval]}) + msg = ( + "Column WithInf contains infinity or -infinity" + "which is outside the range supported by Stata." + ) + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + df.to_stata(path) + + def test_path_pathlib(self): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + reader = lambda x: read_stata(x).set_index("index") + result = tm.round_trip_pathlib(df.to_stata, reader) + tm.assert_frame_equal(df, result) + + def test_pickle_path_localpath(self): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + reader = lambda x: read_stata(x).set_index("index") + result = tm.round_trip_localpath(df.to_stata, reader) + tm.assert_frame_equal(df, result) + + @pytest.mark.parametrize("write_index", [True, False]) + def test_value_labels_iterator(self, write_index): + # GH 16923 + d = {"A": ["B", "E", "C", "A", "E"]} + df = DataFrame(data=d) + df["A"] = df["A"].astype("category") + with tm.ensure_clean() as path: + df.to_stata(path, write_index=write_index) + + with read_stata(path, iterator=True) as dta_iter: + value_labels = dta_iter.value_labels() + assert value_labels == {"A": {0: "A", 1: "B", 2: "C", 3: "E"}} + + def test_set_index(self): + # GH 17328 + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + with tm.ensure_clean() as path: + df.to_stata(path) + reread = read_stata(path, index_col="index") + tm.assert_frame_equal(df, reread) + + @pytest.mark.parametrize( + "column", ["ms", "day", "week", "month", "qtr", "half", "yr"] + ) + def test_date_parsing_ignores_format_details(self, column, datapath): + # GH 17797 + # + # Test that display formats are ignored when determining if a numeric + # column is a date value. + # + # All date types are stored as numbers and format associated with the + # column denotes both the type of the date and the display format. + # + # STATA supports 9 date types which each have distinct units. We test 7 + # of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that + # accounts for leap seconds and %tb relies on STATAs business calendar. + df = read_stata(datapath("io", "data", "stata", "stata13_dates.dta")) + unformatted = df.loc[0, column] + formatted = df.loc[0, column + "_fmt"] + assert unformatted == formatted + + def test_writer_117(self): + original = DataFrame( + data=[ + [ + "string", + "object", + 1, + 1, + 1, + 1.1, + 1.1, + np.datetime64("2003-12-25"), + "a", + "a" * 2045, + "a" * 5000, + "a", + ], + [ + "string-1", + "object-1", + 1, + 1, + 1, + 1.1, + 1.1, + np.datetime64("2003-12-26"), + "b", + "b" * 2045, + "", + "", + ], + ], + columns=[ + "string", + "object", + "int8", + "int16", + "int32", + "float32", + "float64", + "datetime", + "s1", + "s2045", + "srtl", + "forced_strl", + ], + ) + original["object"] = Series(original["object"], dtype=object) + original["int8"] = Series(original["int8"], dtype=np.int8) + original["int16"] = Series(original["int16"], dtype=np.int16) + original["int32"] = original["int32"].astype(np.int32) + original["float32"] = Series(original["float32"], dtype=np.float32) + original.index.name = "index" + original.index = original.index.astype(np.int32) + copy = original.copy() + with tm.ensure_clean() as path: + original.to_stata( + path, + convert_dates={"datetime": "tc"}, + convert_strl=["forced_strl"], + version=117, + ) + written_and_read_again = self.read_dta(path) + # original.index is np.int32, read index is np.int64 + tm.assert_frame_equal( + written_and_read_again.set_index("index"), + original, + check_index_type=False, + ) + tm.assert_frame_equal(original, copy) + + def test_convert_strl_name_swap(self): + original = DataFrame( + [["a" * 3000, "A", "apple"], ["b" * 1000, "B", "banana"]], + columns=["long1" * 10, "long", 1], + ) + original.index.name = "index" + + with tm.assert_produces_warning(InvalidColumnName): + with tm.ensure_clean() as path: + original.to_stata(path, convert_strl=["long", 1], version=117) + reread = self.read_dta(path) + reread = reread.set_index("index") + reread.columns = original.columns + tm.assert_frame_equal(reread, original, check_index_type=False) + + def test_invalid_date_conversion(self): + # GH 12259 + dates = [ + dt.datetime(1999, 12, 31, 12, 12, 12, 12000), + dt.datetime(2012, 12, 21, 12, 21, 12, 21000), + dt.datetime(1776, 7, 4, 7, 4, 7, 4000), + ] + original = DataFrame( + { + "nums": [1.0, 2.0, 3.0], + "strs": ["apple", "banana", "cherry"], + "dates": dates, + } + ) + + with tm.ensure_clean() as path: + msg = "convert_dates key must be a column or an integer" + with pytest.raises(ValueError, match=msg): + original.to_stata(path, convert_dates={"wrong_name": "tc"}) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_nonfile_writing(self, version): + # GH 21041 + bio = io.BytesIO() + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + with tm.ensure_clean() as path: + df.to_stata(bio, version=version) + bio.seek(0) + with open(path, "wb") as dta: + dta.write(bio.read()) + reread = read_stata(path, index_col="index") + tm.assert_frame_equal(df, reread) + + def test_gzip_writing(self): + # writing version 117 requires seek and cannot be used with gzip + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + with tm.ensure_clean() as path: + with gzip.GzipFile(path, "wb") as gz: + df.to_stata(gz, version=114) + with gzip.GzipFile(path, "rb") as gz: + reread = read_stata(gz, index_col="index") + tm.assert_frame_equal(df, reread) + + def test_unicode_dta_118(self, datapath): + unicode_df = self.read_dta(datapath("io", "data", "stata", "stata16_118.dta")) + + columns = ["utf8", "latin1", "ascii", "utf8_strl", "ascii_strl"] + values = [ + ["ραηδας", "PÄNDÄS", "p", "ραηδας", "p"], + ["ƤĀńĐąŜ", "Ö", "a", "ƤĀńĐąŜ", "a"], + ["ᴘᴀᴎᴅᴀS", "Ü", "n", "ᴘᴀᴎᴅᴀS", "n"], + [" ", " ", "d", " ", "d"], + [" ", "", "a", " ", "a"], + ["", "", "s", "", "s"], + ["", "", " ", "", " "], + ] + expected = DataFrame(values, columns=columns) + + tm.assert_frame_equal(unicode_df, expected) + + def test_mixed_string_strl(self): + # GH 23633 + output = [{"mixed": "string" * 500, "number": 0}, {"mixed": None, "number": 1}] + output = DataFrame(output) + output.number = output.number.astype("int32") + + with tm.ensure_clean() as path: + output.to_stata(path, write_index=False, version=117) + reread = read_stata(path) + expected = output.fillna("") + tm.assert_frame_equal(reread, expected) + + # Check strl supports all None (null) + output["mixed"] = None + output.to_stata( + path, write_index=False, convert_strl=["mixed"], version=117 + ) + reread = read_stata(path) + expected = output.fillna("") + tm.assert_frame_equal(reread, expected) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_all_none_exception(self, version): + output = [{"none": "none", "number": 0}, {"none": None, "number": 1}] + output = DataFrame(output) + output["none"] = None + with tm.ensure_clean() as path: + with pytest.raises(ValueError, match="Column `none` cannot be exported"): + output.to_stata(path, version=version) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_invalid_file_not_written(self, version): + content = "Here is one __�__ Another one __·__ Another one __½__" + df = DataFrame([content], columns=["invalid"]) + with tm.ensure_clean() as path: + msg1 = ( + r"'latin-1' codec can't encode character '\\ufffd' " + r"in position 14: ordinal not in range\(256\)" + ) + msg2 = ( + "'ascii' codec can't decode byte 0xef in position 14: " + r"ordinal not in range\(128\)" + ) + with pytest.raises(UnicodeEncodeError, match=f"{msg1}|{msg2}"): + df.to_stata(path) + + def test_strl_latin1(self): + # GH 23573, correct GSO data to reflect correct size + output = DataFrame( + [["pandas"] * 2, ["þâÑÐŧ"] * 2], columns=["var_str", "var_strl"] + ) + + with tm.ensure_clean() as path: + output.to_stata(path, version=117, convert_strl=["var_strl"]) + with open(path, "rb") as reread: + content = reread.read() + expected = "þâÑÐŧ" + assert expected.encode("latin-1") in content + assert expected.encode("utf-8") in content + gsos = content.split(b"strls")[1][1:-2] + for gso in gsos.split(b"GSO")[1:]: + val = gso.split(b"\x00")[-2] + size = gso[gso.find(b"\x82") + 1] + assert len(val) == size - 1 + + def test_encoding_latin1_118(self, datapath): + # GH 25960 + msg = """ +One or more strings in the dta file could not be decoded using utf-8, and +so the fallback encoding of latin-1 is being used. This can happen when a file +has been incorrectly encoded by Stata or some other software. You should verify +the string values returned are correct.""" + # Move path outside of read_stata, or else assert_produces_warning + # will block pytests skip mechanism from triggering (failing the test) + # if the path is not present + path = datapath("io", "data", "stata", "stata1_encoding_118.dta") + with tm.assert_produces_warning(UnicodeWarning, filter_level="once") as w: + encoded = read_stata(path) + # with filter_level="always", produces 151 warnings which can be slow + assert len(w) == 1 + assert w[0].message.args[0] == msg + + expected = DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"]) + tm.assert_frame_equal(encoded, expected) + + @pytest.mark.slow + def test_stata_119(self, datapath): + # Gzipped since contains 32,999 variables and uncompressed is 20MiB + # Just validate that the reader reports correct number of variables + # to avoid high peak memory + with gzip.open( + datapath("io", "data", "stata", "stata1_119.dta.gz"), "rb" + ) as gz: + with StataReader(gz) as reader: + reader._ensure_open() + assert reader._nvar == 32999 + + @pytest.mark.parametrize("version", [118, 119, None]) + def test_utf8_writer(self, version): + cat = pd.Categorical(["a", "β", "ĉ"], ordered=True) + data = DataFrame( + [ + [1.0, 1, "ᴬ", "ᴀ relatively long ŝtring"], + [2.0, 2, "ᴮ", ""], + [3.0, 3, "ᴰ", None], + ], + columns=["Å", "β", "ĉ", "strls"], + ) + data["ᴐᴬᵀ"] = cat + variable_labels = { + "Å": "apple", + "β": "ᵈᵉᵊ", + "ĉ": "ᴎტჄႲႳႴႶႺ", + "strls": "Long Strings", + "ᴐᴬᵀ": "", + } + data_label = "ᴅaᵀa-label" + value_labels = {"β": {1: "label", 2: "æøå", 3: "ŋot valid latin-1"}} + data["β"] = data["β"].astype(np.int32) + with tm.ensure_clean() as path: + writer = StataWriterUTF8( + path, + data, + data_label=data_label, + convert_strl=["strls"], + variable_labels=variable_labels, + write_index=False, + version=version, + value_labels=value_labels, + ) + writer.write_file() + reread_encoded = read_stata(path) + # Missing is intentionally converted to empty strl + data["strls"] = data["strls"].fillna("") + # Variable with value labels is reread as categorical + data["β"] = ( + data["β"].replace(value_labels["β"]).astype("category").cat.as_ordered() + ) + tm.assert_frame_equal(data, reread_encoded) + with StataReader(path) as reader: + assert reader.data_label == data_label + assert reader.variable_labels() == variable_labels + + data.to_stata(path, version=version, write_index=False) + reread_to_stata = read_stata(path) + tm.assert_frame_equal(data, reread_to_stata) + + def test_writer_118_exceptions(self): + df = DataFrame(np.zeros((1, 33000), dtype=np.int8)) + with tm.ensure_clean() as path: + with pytest.raises(ValueError, match="version must be either 118 or 119."): + StataWriterUTF8(path, df, version=117) + with tm.ensure_clean() as path: + with pytest.raises(ValueError, match="You must use version 119"): + StataWriterUTF8(path, df, version=118) + + @pytest.mark.parametrize( + "dtype_backend", + ["numpy_nullable", pytest.param("pyarrow", marks=td.skip_if_no("pyarrow"))], + ) + def test_read_write_ea_dtypes(self, dtype_backend): + df = DataFrame( + { + "a": [1, 2, None], + "b": ["a", "b", "c"], + "c": [True, False, None], + "d": [1.5, 2.5, 3.5], + "e": pd.date_range("2020-12-31", periods=3, freq="D"), + }, + index=pd.Index([0, 1, 2], name="index"), + ) + df = df.convert_dtypes(dtype_backend=dtype_backend) + df.to_stata("test_stata.dta", version=118) + + with tm.ensure_clean() as path: + df.to_stata(path) + written_and_read_again = self.read_dta(path) + + expected = DataFrame( + { + "a": [1, 2, np.nan], + "b": ["a", "b", "c"], + "c": [1.0, 0, np.nan], + "d": [1.5, 2.5, 3.5], + "e": pd.date_range("2020-12-31", periods=3, freq="D"), + }, + index=pd.Index([0, 1, 2], name="index", dtype=np.int32), + ) + + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + +@pytest.mark.parametrize("version", [105, 108, 111, 113, 114]) +def test_backward_compat(version, datapath): + data_base = datapath("io", "data", "stata") + ref = os.path.join(data_base, "stata-compat-118.dta") + old = os.path.join(data_base, f"stata-compat-{version}.dta") + expected = read_stata(ref) + old_dta = read_stata(old) + tm.assert_frame_equal(old_dta, expected, check_dtype=False) + + +def test_direct_read(datapath, monkeypatch): + file_path = datapath("io", "data", "stata", "stata-compat-118.dta") + + # Test that opening a file path doesn't buffer the file. + with StataReader(file_path) as reader: + # Must not have been buffered to memory + assert not reader.read().empty + assert not isinstance(reader._path_or_buf, io.BytesIO) + + # Test that we use a given fp exactly, if possible. + with open(file_path, "rb") as fp: + with StataReader(fp) as reader: + assert not reader.read().empty + assert reader._path_or_buf is fp + + # Test that we use a given BytesIO exactly, if possible. + with open(file_path, "rb") as fp: + with io.BytesIO(fp.read()) as bio: + with StataReader(bio) as reader: + assert not reader.read().empty + assert reader._path_or_buf is bio + + +def test_statareader_warns_when_used_without_context(datapath): + file_path = datapath("io", "data", "stata", "stata-compat-118.dta") + with tm.assert_produces_warning( + ResourceWarning, + match="without using a context manager", + ): + sr = StataReader(file_path) + sr.read() + with tm.assert_produces_warning( + FutureWarning, + match="is not part of the public API", + ): + sr.close() + + +@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) +@pytest.mark.parametrize("use_dict", [True, False]) +@pytest.mark.parametrize("infer", [True, False]) +def test_compression(compression, version, use_dict, infer, compression_to_extension): + file_name = "dta_inferred_compression.dta" + if compression: + if use_dict: + file_ext = compression + else: + file_ext = compression_to_extension[compression] + file_name += f".{file_ext}" + compression_arg = compression + if infer: + compression_arg = "infer" + if use_dict: + compression_arg = {"method": compression} + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") + ) + df.index.name = "index" + with tm.ensure_clean(file_name) as path: + df.to_stata(path, version=version, compression=compression_arg) + if compression == "gzip": + with gzip.open(path, "rb") as comp: + fp = io.BytesIO(comp.read()) + elif compression == "zip": + with zipfile.ZipFile(path, "r") as comp: + fp = io.BytesIO(comp.read(comp.filelist[0])) + elif compression == "tar": + with tarfile.open(path) as tar: + fp = io.BytesIO(tar.extractfile(tar.getnames()[0]).read()) + elif compression == "bz2": + with bz2.open(path, "rb") as comp: + fp = io.BytesIO(comp.read()) + elif compression == "zstd": + zstd = pytest.importorskip("zstandard") + with zstd.open(path, "rb") as comp: + fp = io.BytesIO(comp.read()) + elif compression == "xz": + lzma = pytest.importorskip("lzma") + with lzma.open(path, "rb") as comp: + fp = io.BytesIO(comp.read()) + elif compression is None: + fp = path + reread = read_stata(fp, index_col="index") + + expected = df.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(reread, expected) + + +@pytest.mark.parametrize("method", ["zip", "infer"]) +@pytest.mark.parametrize("file_ext", [None, "dta", "zip"]) +def test_compression_dict(method, file_ext): + file_name = f"test.{file_ext}" + archive_name = "test.dta" + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") + ) + df.index.name = "index" + with tm.ensure_clean(file_name) as path: + compression = {"method": method, "archive_name": archive_name} + df.to_stata(path, compression=compression) + if method == "zip" or file_ext == "zip": + with zipfile.ZipFile(path, "r") as zp: + assert len(zp.filelist) == 1 + assert zp.filelist[0].filename == archive_name + fp = io.BytesIO(zp.read(zp.filelist[0])) + else: + fp = path + reread = read_stata(fp, index_col="index") + + expected = df.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(reread, expected) + + +@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) +def test_chunked_categorical(version): + df = DataFrame({"cats": Series(["a", "b", "a", "b", "c"], dtype="category")}) + df.index.name = "index" + + expected = df.copy() + expected.index = expected.index.astype(np.int32) + + with tm.ensure_clean() as path: + df.to_stata(path, version=version) + with StataReader(path, chunksize=2, order_categoricals=False) as reader: + for i, block in enumerate(reader): + block = block.set_index("index") + assert "cats" in block + tm.assert_series_equal( + block.cats, expected.cats.iloc[2 * i : 2 * (i + 1)] + ) + + +def test_chunked_categorical_partial(datapath): + dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta") + values = ["a", "b", "a", "b", 3.0] + with StataReader(dta_file, chunksize=2) as reader: + with tm.assert_produces_warning(CategoricalConversionWarning): + for i, block in enumerate(reader): + assert list(block.cats) == values[2 * i : 2 * (i + 1)] + if i < 2: + idx = pd.Index(["a", "b"]) + else: + idx = pd.Index([3.0], dtype="float64") + tm.assert_index_equal(block.cats.cat.categories, idx) + with tm.assert_produces_warning(CategoricalConversionWarning): + with StataReader(dta_file, chunksize=5) as reader: + large_chunk = reader.__next__() + direct = read_stata(dta_file) + tm.assert_frame_equal(direct, large_chunk) + + +@pytest.mark.parametrize("chunksize", (-1, 0, "apple")) +def test_iterator_errors(datapath, chunksize): + dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta") + with pytest.raises(ValueError, match="chunksize must be a positive"): + with StataReader(dta_file, chunksize=chunksize): + pass + + +def test_iterator_value_labels(): + # GH 31544 + values = ["c_label", "b_label"] + ["a_label"] * 500 + df = DataFrame({f"col{k}": pd.Categorical(values, ordered=True) for k in range(2)}) + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False) + expected = pd.Index(["a_label", "b_label", "c_label"], dtype="object") + with read_stata(path, chunksize=100) as reader: + for j, chunk in enumerate(reader): + for i in range(2): + tm.assert_index_equal(chunk.dtypes.iloc[i].categories, expected) + tm.assert_frame_equal(chunk, df.iloc[j * 100 : (j + 1) * 100]) + + +def test_precision_loss(): + df = DataFrame( + [[sum(2**i for i in range(60)), sum(2**i for i in range(52))]], + columns=["big", "little"], + ) + with tm.ensure_clean() as path: + with tm.assert_produces_warning( + PossiblePrecisionLoss, match="Column converted from int64 to float64" + ): + df.to_stata(path, write_index=False) + reread = read_stata(path) + expected_dt = Series([np.float64, np.float64], index=["big", "little"]) + tm.assert_series_equal(reread.dtypes, expected_dt) + assert reread.loc[0, "little"] == df.loc[0, "little"] + assert reread.loc[0, "big"] == float(df.loc[0, "big"]) + + +def test_compression_roundtrip(compression): + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.index.name = "index" + + with tm.ensure_clean() as path: + df.to_stata(path, compression=compression) + reread = read_stata(path, compression=compression, index_col="index") + tm.assert_frame_equal(df, reread) + + # explicitly ensure file was compressed. + with tm.decompress_file(path, compression) as fh: + contents = io.BytesIO(fh.read()) + reread = read_stata(contents, index_col="index") + tm.assert_frame_equal(df, reread) + + +@pytest.mark.parametrize("to_infer", [True, False]) +@pytest.mark.parametrize("read_infer", [True, False]) +def test_stata_compression( + compression_only, read_infer, to_infer, compression_to_extension +): + compression = compression_only + + ext = compression_to_extension[compression] + filename = f"test.{ext}" + + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.index.name = "index" + + to_compression = "infer" if to_infer else compression + read_compression = "infer" if read_infer else compression + + with tm.ensure_clean(filename) as path: + df.to_stata(path, compression=to_compression) + result = read_stata(path, compression=read_compression, index_col="index") + tm.assert_frame_equal(result, df) + + +def test_non_categorical_value_labels(): + data = DataFrame( + { + "fully_labelled": [1, 2, 3, 3, 1], + "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan], + "Y": [7, 7, 9, 8, 10], + "Z": pd.Categorical(["j", "k", "l", "k", "j"]), + } + ) + + with tm.ensure_clean() as path: + value_labels = { + "fully_labelled": {1: "one", 2: "two", 3: "three"}, + "partially_labelled": {1.0: "one", 2.0: "two"}, + } + expected = {**value_labels, "Z": {0: "j", 1: "k", 2: "l"}} + + writer = StataWriter(path, data, value_labels=value_labels) + writer.write_file() + + with StataReader(path) as reader: + reader_value_labels = reader.value_labels() + assert reader_value_labels == expected + + msg = "Can't create value labels for notY, it wasn't found in the dataset." + with pytest.raises(KeyError, match=msg): + value_labels = {"notY": {7: "label1", 8: "label2"}} + StataWriter(path, data, value_labels=value_labels) + + msg = ( + "Can't create value labels for Z, value labels " + "can only be applied to numeric columns." + ) + with pytest.raises(ValueError, match=msg): + value_labels = {"Z": {1: "a", 2: "k", 3: "j", 4: "i"}} + StataWriter(path, data, value_labels=value_labels) + + +def test_non_categorical_value_label_name_conversion(): + # Check conversion of invalid variable names + data = DataFrame( + { + "invalid~!": [1, 1, 2, 3, 5, 8], # Only alphanumeric and _ + "6_invalid": [1, 1, 2, 3, 5, 8], # Must start with letter or _ + "invalid_name_longer_than_32_characters": [8, 8, 9, 9, 8, 8], # Too long + "aggregate": [2, 5, 5, 6, 6, 9], # Reserved words + (1, 2): [1, 2, 3, 4, 5, 6], # Hashable non-string + } + ) + + value_labels = { + "invalid~!": {1: "label1", 2: "label2"}, + "6_invalid": {1: "label1", 2: "label2"}, + "invalid_name_longer_than_32_characters": {8: "eight", 9: "nine"}, + "aggregate": {5: "five"}, + (1, 2): {3: "three"}, + } + + expected = { + "invalid__": {1: "label1", 2: "label2"}, + "_6_invalid": {1: "label1", 2: "label2"}, + "invalid_name_longer_than_32_char": {8: "eight", 9: "nine"}, + "_aggregate": {5: "five"}, + "_1__2_": {3: "three"}, + } + + with tm.ensure_clean() as path: + with tm.assert_produces_warning(InvalidColumnName): + data.to_stata(path, value_labels=value_labels) + + with StataReader(path) as reader: + reader_value_labels = reader.value_labels() + assert reader_value_labels == expected + + +def test_non_categorical_value_label_convert_categoricals_error(): + # Mapping more than one value to the same label is valid for Stata + # labels, but can't be read with convert_categoricals=True + value_labels = { + "repeated_labels": {10: "Ten", 20: "More than ten", 40: "More than ten"} + } + + data = DataFrame( + { + "repeated_labels": [10, 10, 20, 20, 40, 40], + } + ) + + with tm.ensure_clean() as path: + data.to_stata(path, value_labels=value_labels) + + with StataReader(path, convert_categoricals=False) as reader: + reader_value_labels = reader.value_labels() + assert reader_value_labels == value_labels + + col = "repeated_labels" + repeats = "-" * 80 + "\n" + "\n".join(["More than ten"]) + + msg = f""" +Value labels for column {col} are not unique. These cannot be converted to +pandas categoricals. + +Either read the file with `convert_categoricals` set to False or use the +low level interface in `StataReader` to separately read the values and the +value_labels. + +The repeated labels are: +{repeats} +""" + with pytest.raises(ValueError, match=msg): + read_stata(path, convert_categoricals=True) + + +@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) +@pytest.mark.parametrize( + "dtype", + [ + pd.BooleanDtype, + pd.Int8Dtype, + pd.Int16Dtype, + pd.Int32Dtype, + pd.Int64Dtype, + pd.UInt8Dtype, + pd.UInt16Dtype, + pd.UInt32Dtype, + pd.UInt64Dtype, + ], +) +def test_nullable_support(dtype, version): + df = DataFrame( + { + "a": Series([1.0, 2.0, 3.0]), + "b": Series([1, pd.NA, pd.NA], dtype=dtype.name), + "c": Series(["a", "b", None]), + } + ) + dtype_name = df.b.dtype.numpy_dtype.name + # Only use supported names: no uint, bool or int64 + dtype_name = dtype_name.replace("u", "") + if dtype_name == "int64": + dtype_name = "int32" + elif dtype_name == "bool": + dtype_name = "int8" + value = StataMissingValue.BASE_MISSING_VALUES[dtype_name] + smv = StataMissingValue(value) + expected_b = Series([1, smv, smv], dtype=object, name="b") + expected_c = Series(["a", "b", ""], name="c") + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False, version=version) + reread = read_stata(path, convert_missing=True) + tm.assert_series_equal(df.a, reread.a) + tm.assert_series_equal(reread.b, expected_b) + tm.assert_series_equal(reread.c, expected_c) + + +def test_empty_frame(): + # GH 46240 + # create an empty DataFrame with int64 and float64 dtypes + df = DataFrame(data={"a": range(3), "b": [1.0, 2.0, 3.0]}).head(0) + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False, version=117) + # Read entire dataframe + df2 = read_stata(path) + assert "b" in df2 + # Dtypes don't match since no support for int32 + dtypes = Series({"a": np.dtype("int32"), "b": np.dtype("float64")}) + tm.assert_series_equal(df2.dtypes, dtypes) + # read one column of empty .dta file + df3 = read_stata(path, columns=["a"]) + assert "b" not in df3 + tm.assert_series_equal(df3.dtypes, dtypes.loc[["a"]])