diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_177_mp_rank_00_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_177_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..46c8bc0a858109ebb5f06eb2b27b6c3ecf5540ee --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_177_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:163e02a237da82f144fc1024f84d195c18ccdc31ff7c654a8a82571cec6f7c7d +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_20_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_20_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..bb62e5a1a8b302b83476cafe386ee09d18944fa8 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_20_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ab0fc2e5a020ab5fffe26c86381c4dfe24605997577c2a0ddd45679a9821eb0 +size 41830330 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_214_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_214_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9cec7b00246e09aa113a4d9594543cb68ab85b75 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_214_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8d3ba90236e9d4be14419a32fa3f527d664fe39c31255ad308838e8f878aba5 +size 41830148 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcbaec09d270da4a113acdc77bde811efb518cf0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..f73400dfe689e91c4c2b457c4be1a0a41380fd6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/conftest.py @@ -0,0 +1,68 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.integer import ( + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, +) + + +@pytest.fixture( + params=[ + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, + ] +) +def dtype(request): + """Parametrized fixture returning integer 'dtype'""" + return request.param() + + +@pytest.fixture +def data(dtype): + """ + Fixture returning 'data' array with valid and missing values according to + parametrized integer 'dtype'. + + Used to test dtype conversion with and without missing values. + """ + return pd.array( + list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100], + dtype=dtype, + ) + + +@pytest.fixture +def data_missing(dtype): + """ + Fixture returning array with exactly one NaN and one valid integer, + according to parametrized integer 'dtype'. + + Used to test dtype conversion with and without missing values. + """ + return pd.array([np.nan, 1], dtype=dtype) + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture returning 'data' or 'data_missing' integer arrays. + + Used to test dtype conversion with and without missing values. + """ + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_arithmetic.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..8acd298f37a0795851c4c37bceff5a8222f521ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_arithmetic.py @@ -0,0 +1,385 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core import ops +from pandas.core.arrays import FloatingArray + +# Basic test for the arithmetic array ops +# ----------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "opname, exp", + [("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])], + ids=["add", "mul"], +) +def test_add_mul(dtype, opname, exp): + a = pd.array([0, 1, None, 3, 4], dtype=dtype) + b = pd.array([1, 2, 3, None, 5], dtype=dtype) + + # array / array + expected = pd.array(exp, dtype=dtype) + + op = getattr(operator, opname) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + + op = getattr(ops, "r" + opname) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + + +def test_sub(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a - b + expected = pd.array([1, 1, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_div(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a / b + expected = pd.array([np.inf, 2, None, None, 1.25], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)]) +def test_divide_by_zero(zero, negative): + # https://github.com/pandas-dev/pandas/issues/27398, GH#22793 + a = pd.array([0, 1, -1, None], dtype="Int64") + result = a / zero + expected = FloatingArray( + np.array([np.nan, np.inf, -np.inf, 1], dtype="float64"), + np.array([False, False, False, True]), + ) + if negative: + expected *= -1 + tm.assert_extension_array_equal(result, expected) + + +def test_floordiv(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a // b + # Series op sets 1//0 to np.inf, which IntegerArray does not do (yet) + expected = pd.array([0, 2, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_floordiv_by_int_zero_no_mask(any_int_ea_dtype): + # GH 48223: Aligns with non-masked floordiv + # but differs from numpy + # https://github.com/pandas-dev/pandas/issues/30188#issuecomment-564452740 + ser = pd.Series([0, 1], dtype=any_int_ea_dtype) + result = 1 // ser + expected = pd.Series([np.inf, 1.0], dtype="Float64") + tm.assert_series_equal(result, expected) + + ser_non_nullable = ser.astype(ser.dtype.numpy_dtype) + result = 1 // ser_non_nullable + expected = expected.astype(np.float64) + tm.assert_series_equal(result, expected) + + +def test_mod(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a % b + expected = pd.array([0, 0, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_scalar(): + a = pd.array([-1, 0, 1, None, 2], dtype="Int64") + result = a**0 + expected = pd.array([1, 1, 1, 1, 1], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a**1 + expected = pd.array([-1, 0, 1, None, 2], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a**pd.NA + expected = pd.array([None, None, 1, None, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a**np.nan + expected = FloatingArray( + np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64"), + np.array([False, False, False, True, False]), + ) + tm.assert_extension_array_equal(result, expected) + + # reversed + a = a[1:] # Can't raise integers to negative powers. + + result = 0**a + expected = pd.array([1, 0, None, 0], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = 1**a + expected = pd.array([1, 1, 1, 1], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = pd.NA**a + expected = pd.array([1, None, None, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = np.nan**a + expected = FloatingArray( + np.array([1, np.nan, np.nan, np.nan], dtype="float64"), + np.array([False, False, True, False]), + ) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_array(): + a = pd.array([0, 0, 0, 1, 1, 1, None, None, None]) + b = pd.array([0, 1, None, 0, 1, None, 0, 1, None]) + result = a**b + expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None]) + tm.assert_extension_array_equal(result, expected) + + +def test_rpow_one_to_na(): + # https://github.com/pandas-dev/pandas/issues/22022 + # https://github.com/pandas-dev/pandas/issues/29997 + arr = pd.array([np.nan, np.nan], dtype="Int64") + result = np.array([1.0, 2.0]) ** arr + expected = pd.array([1.0, np.nan], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("other", [0, 0.5]) +def test_numpy_zero_dim_ndarray(other): + arr = pd.array([1, None, 2]) + result = arr + np.array(other) + expected = arr + other + tm.assert_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_values(data, all_arithmetic_operators, using_infer_string): + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + + if using_infer_string: + import pyarrow as pa + + errs = (TypeError, pa.lib.ArrowNotImplementedError, NotImplementedError) + else: + errs = TypeError + + # invalid scalars + msg = "|".join( + [ + r"can only perform ops with numeric values", + r"IntegerArray cannot perform the operation mod", + r"unsupported operand type", + r"can only concatenate str \(not \"int\"\) to str", + "not all arguments converted during string", + "ufunc '.*' not supported for the input types, and the inputs could not", + "ufunc '.*' did not contain a loop with signature matching types", + "Addition/subtraction of integers and integer-arrays with Timestamp", + "has no kernel", + "not implemented", + "The 'out' kwarg is necessary. Use numpy.strings.multiply without it.", + ] + ) + with pytest.raises(errs, match=msg): + ops("foo") + with pytest.raises(errs, match=msg): + ops(pd.Timestamp("20180101")) + + # invalid array-likes + str_ser = pd.Series("foo", index=s.index) + # with pytest.raises(TypeError, match=msg): + if ( + all_arithmetic_operators + in [ + "__mul__", + "__rmul__", + ] + and not using_infer_string + ): # (data[~data.isna()] >= 0).all(): + res = ops(str_ser) + expected = pd.Series(["foo" * x for x in data], index=s.index) + expected = expected.fillna(np.nan) + # TODO: doing this fillna to keep tests passing as we make + # assert_almost_equal stricter, but the expected with pd.NA seems + # more-correct than np.nan here. + tm.assert_series_equal(res, expected) + else: + with pytest.raises(errs, match=msg): + ops(str_ser) + + msg = "|".join( + [ + "can only perform ops with numeric values", + "cannot perform .* with this index type: DatetimeArray", + "Addition/subtraction of integers and integer-arrays " + "with DatetimeArray is no longer supported. *", + "unsupported operand type", + r"can only concatenate str \(not \"int\"\) to str", + "not all arguments converted during string", + "cannot subtract DatetimeArray from ndarray", + "has no kernel", + "not implemented", + ] + ) + with pytest.raises(errs, match=msg): + ops(pd.Series(pd.date_range("20180101", periods=len(s)))) + + +# Various +# ----------------------------------------------------------------------------- + + +# TODO test unsigned overflow + + +def test_arith_coerce_scalar(data, all_arithmetic_operators): + op = tm.get_op_from_name(all_arithmetic_operators) + s = pd.Series(data) + other = 0.01 + + result = op(s, other) + expected = op(s.astype(float), other) + expected = expected.astype("Float64") + + # rmod results in NaN that wasn't NA in original nullable Series -> unmask it + if all_arithmetic_operators == "__rmod__": + mask = (s == 0).fillna(False).to_numpy(bool) + expected.array._mask[mask] = False + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("other", [1.0, np.array(1.0)]) +def test_arithmetic_conversion(all_arithmetic_operators, other): + # if we have a float operand we should have a float result + # if that is equal to an integer + op = tm.get_op_from_name(all_arithmetic_operators) + + s = pd.Series([1, 2, 3], dtype="Int64") + result = op(s, other) + assert result.dtype == "Float64" + + +def test_cross_type_arithmetic(): + df = pd.DataFrame( + { + "A": pd.Series([1, 2, np.nan], dtype="Int64"), + "B": pd.Series([1, np.nan, 3], dtype="UInt8"), + "C": [1, 2, 3], + } + ) + + result = df.A + df.C + expected = pd.Series([2, 4, np.nan], dtype="Int64") + tm.assert_series_equal(result, expected) + + result = (df.A + df.C) * 3 == 12 + expected = pd.Series([False, True, None], dtype="boolean") + tm.assert_series_equal(result, expected) + + result = df.A + df.B + expected = pd.Series([2, np.nan, np.nan], dtype="Int64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("op", ["mean"]) +def test_reduce_to_float(op): + # some reduce ops always return float, even if the result + # is a rounded number + df = pd.DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": pd.array([1, None, 3], dtype="Int64"), + } + ) + + # op + result = getattr(df.C, op)() + assert isinstance(result, float) + + # groupby + result = getattr(df.groupby("A"), op)() + + expected = pd.DataFrame( + {"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Float64")}, + index=pd.Index(["a", "b"], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "source, neg_target, abs_target", + [ + ([1, 2, 3], [-1, -2, -3], [1, 2, 3]), + ([1, 2, None], [-1, -2, None], [1, 2, None]), + ([-1, 0, 1], [1, 0, -1], [1, 0, 1]), + ], +) +def test_unary_int_operators(any_signed_int_ea_dtype, source, neg_target, abs_target): + dtype = any_signed_int_ea_dtype + arr = pd.array(source, dtype=dtype) + neg_result, pos_result, abs_result = -arr, +arr, abs(arr) + neg_target = pd.array(neg_target, dtype=dtype) + abs_target = pd.array(abs_target, dtype=dtype) + + tm.assert_extension_array_equal(neg_result, neg_target) + tm.assert_extension_array_equal(pos_result, arr) + assert not tm.shares_memory(pos_result, arr) + tm.assert_extension_array_equal(abs_result, abs_target) + + +def test_values_multiplying_large_series_by_NA(): + # GH#33701 + + result = pd.NA * pd.Series(np.zeros(10001)) + expected = pd.Series([pd.NA] * 10001) + + tm.assert_series_equal(result, expected) + + +def test_bitwise(dtype): + left = pd.array([1, None, 3, 4], dtype=dtype) + right = pd.array([None, 3, 5, 4], dtype=dtype) + + result = left | right + expected = pd.array([None, None, 3 | 5, 4 | 4], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = left & right + expected = pd.array([None, None, 3 & 5, 4 & 4], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = left ^ right + expected = pd.array([None, None, 3 ^ 5, 4 ^ 4], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + # TODO: desired behavior when operating with boolean? defer? + + floats = right.astype("Float64") + with pytest.raises(TypeError, match="unsupported operand type"): + left | floats + with pytest.raises(TypeError, match="unsupported operand type"): + left & floats + with pytest.raises(TypeError, match="unsupported operand type"): + left ^ floats diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_comparison.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..568b0b087bf1db9610960dba12ea2e0bab8f1729 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_comparison.py @@ -0,0 +1,39 @@ +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.arrays.masked_shared import ( + ComparisonOps, + NumericOps, +) + + +class TestComparisonOps(NumericOps, ComparisonOps): + @pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1]) + def test_scalar(self, other, comparison_op, dtype): + ComparisonOps.test_scalar(self, other, comparison_op, dtype) + + def test_compare_to_int(self, dtype, comparison_op): + # GH 28930 + op_name = f"__{comparison_op.__name__}__" + s1 = pd.Series([1, None, 3], dtype=dtype) + s2 = pd.Series([1, None, 3], dtype="float") + + method = getattr(s1, op_name) + result = method(2) + + method = getattr(s2, op_name) + expected = method(2).astype("boolean") + expected[s2.isna()] = pd.NA + + tm.assert_series_equal(result, expected) + + +def test_equals(): + # GH-30652 + # equals is generally tested in /tests/extension/base/methods, but this + # specifically tests that two arrays of the same class but different dtype + # do not evaluate equal + a1 = pd.array([1, 2, None], dtype="Int64") + a2 = pd.array([1, 2, None], dtype="Int32") + assert a1.equals(a2) is False diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_concat.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_concat.py new file mode 100644 index 0000000000000000000000000000000000000000..feba574da548fd597c25103f67821145bccec9ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_concat.py @@ -0,0 +1,69 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Int64", "Int64"], "Int64"), + (["UInt64", "UInt64"], "UInt64"), + (["Int8", "Int8"], "Int8"), + (["Int8", "Int16"], "Int16"), + (["UInt8", "Int8"], "Int16"), + (["Int32", "UInt32"], "Int64"), + (["Int64", "UInt64"], "Float64"), + (["Int64", "boolean"], "object"), + (["UInt8", "boolean"], "object"), + ], +) +def test_concat_series(to_concat_dtypes, result_dtype): + # we expect the same dtypes as we would get with non-masked inputs, + # just masked where available. + + result = pd.concat([pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes]) + expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) + + # order doesn't matter for result + result = pd.concat( + [pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes[::-1]] + ) + expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Int64", "int64"], "Int64"), + (["UInt64", "uint64"], "UInt64"), + (["Int8", "int8"], "Int8"), + (["Int8", "int16"], "Int16"), + (["UInt8", "int8"], "Int16"), + (["Int32", "uint32"], "Int64"), + (["Int64", "uint64"], "Float64"), + (["Int64", "bool"], "object"), + (["UInt8", "bool"], "object"), + ], +) +def test_concat_series_with_numpy(to_concat_dtypes, result_dtype): + # we expect the same dtypes as we would get with non-masked inputs, + # just masked where available. + + s1 = pd.Series([0, 1, pd.NA], dtype=to_concat_dtypes[0]) + s2 = pd.Series(np.array([0, 1], dtype=to_concat_dtypes[1])) + result = pd.concat([s1, s2], ignore_index=True) + expected = pd.Series([0, 1, pd.NA, 0, 1], dtype=object).astype(result_dtype) + tm.assert_series_equal(result, expected) + + # order doesn't matter for result + result = pd.concat([s2, s1], ignore_index=True) + expected = pd.Series([0, 1, 0, 1, pd.NA], dtype=object).astype(result_dtype) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_construction.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_construction.py new file mode 100644 index 0000000000000000000000000000000000000000..64fe40e53a9d287b6240a908b7ed9d0cf7ec1396 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_construction.py @@ -0,0 +1,245 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_integer +from pandas.core.arrays import IntegerArray +from pandas.core.arrays.integer import ( + Int8Dtype, + Int32Dtype, + Int64Dtype, +) + + +@pytest.fixture(params=[pd.array, IntegerArray._from_sequence]) +def constructor(request): + """Fixture returning parametrized IntegerArray from given sequence. + + Used to test dtype conversions. + """ + return request.param + + +def test_uses_pandas_na(): + a = pd.array([1, None], dtype=Int64Dtype()) + assert a[1] is pd.NA + + +def test_from_dtype_from_float(data): + # construct from our dtype & string dtype + dtype = data.dtype + + # from float + expected = pd.Series(data) + result = pd.Series(data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # from int / list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # from int / array + expected = pd.Series(data).dropna().reset_index(drop=True) + dropped = np.array(data.dropna()).astype(np.dtype(dtype.type)) + result = pd.Series(dropped, dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + +def test_conversions(data_missing): + # astype to object series + df = pd.DataFrame({"A": data_missing}) + result = df["A"].astype("object") + expected = pd.Series(np.array([pd.NA, 1], dtype=object), name="A") + tm.assert_series_equal(result, expected) + + # convert to object ndarray + # we assert that we are exactly equal + # including type conversions of scalars + result = df["A"].astype("object").values + expected = np.array([pd.NA, 1], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + for r, e in zip(result, expected): + if pd.isnull(r): + assert pd.isnull(e) + elif is_integer(r): + assert r == e + assert is_integer(e) + else: + assert r == e + assert type(r) == type(e) + + +def test_integer_array_constructor(): + values = np.array([1, 2, 3, 4], dtype="int64") + mask = np.array([False, False, False, True], dtype="bool") + + result = IntegerArray(values, mask) + expected = pd.array([1, 2, 3, np.nan], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + msg = r".* should be .* numpy array. Use the 'pd.array' function instead" + with pytest.raises(TypeError, match=msg): + IntegerArray(values.tolist(), mask) + + with pytest.raises(TypeError, match=msg): + IntegerArray(values, mask.tolist()) + + with pytest.raises(TypeError, match=msg): + IntegerArray(values.astype(float), mask) + msg = r"__init__\(\) missing 1 required positional argument: 'mask'" + with pytest.raises(TypeError, match=msg): + IntegerArray(values) + + +def test_integer_array_constructor_copy(): + values = np.array([1, 2, 3, 4], dtype="int64") + mask = np.array([False, False, False, True], dtype="bool") + + result = IntegerArray(values, mask) + assert result._data is values + assert result._mask is mask + + result = IntegerArray(values, mask, copy=True) + assert result._data is not values + assert result._mask is not mask + + +@pytest.mark.parametrize( + "a, b", + [ + ([1, None], [1, np.nan]), + ([None], [np.nan]), + ([None, np.nan], [np.nan, np.nan]), + ([np.nan, np.nan], [np.nan, np.nan]), + ], +) +def test_to_integer_array_none_is_nan(a, b): + result = pd.array(a, dtype="Int64") + expected = pd.array(b, dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + ["foo", "bar"], + "foo", + 1, + 1.0, + pd.date_range("20130101", periods=2), + np.array(["foo"]), + [[1, 2], [3, 4]], + [np.nan, {"a": 1}], + ], +) +def test_to_integer_array_error(values): + # error in converting existing arrays to IntegerArrays + msg = "|".join( + [ + r"cannot be converted to IntegerDtype", + r"invalid literal for int\(\) with base 10:", + r"values must be a 1D list-like", + r"Cannot pass scalar", + r"int\(\) argument must be a string", + ] + ) + with pytest.raises((ValueError, TypeError), match=msg): + pd.array(values, dtype="Int64") + + with pytest.raises((ValueError, TypeError), match=msg): + IntegerArray._from_sequence(values) + + +def test_to_integer_array_inferred_dtype(constructor): + # if values has dtype -> respect it + result = constructor(np.array([1, 2], dtype="int8")) + assert result.dtype == Int8Dtype() + result = constructor(np.array([1, 2], dtype="int32")) + assert result.dtype == Int32Dtype() + + # if values have no dtype -> always int64 + result = constructor([1, 2]) + assert result.dtype == Int64Dtype() + + +def test_to_integer_array_dtype_keyword(constructor): + result = constructor([1, 2], dtype="Int8") + assert result.dtype == Int8Dtype() + + # if values has dtype -> override it + result = constructor(np.array([1, 2], dtype="int8"), dtype="Int32") + assert result.dtype == Int32Dtype() + + +def test_to_integer_array_float(): + result = IntegerArray._from_sequence([1.0, 2.0], dtype="Int64") + expected = pd.array([1, 2], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + with pytest.raises(TypeError, match="cannot safely cast non-equivalent"): + IntegerArray._from_sequence([1.5, 2.0], dtype="Int64") + + # for float dtypes, the itemsize is not preserved + result = IntegerArray._from_sequence( + np.array([1.0, 2.0], dtype="float32"), dtype="Int64" + ) + assert result.dtype == Int64Dtype() + + +def test_to_integer_array_str(): + result = IntegerArray._from_sequence(["1", "2", None], dtype="Int64") + expected = pd.array([1, 2, np.nan], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + with pytest.raises( + ValueError, match=r"invalid literal for int\(\) with base 10: .*" + ): + IntegerArray._from_sequence(["1", "2", ""], dtype="Int64") + + with pytest.raises( + ValueError, match=r"invalid literal for int\(\) with base 10: .*" + ): + IntegerArray._from_sequence(["1.5", "2.0"], dtype="Int64") + + +@pytest.mark.parametrize( + "bool_values, int_values, target_dtype, expected_dtype", + [ + ([False, True], [0, 1], Int64Dtype(), Int64Dtype()), + ([False, True], [0, 1], "Int64", Int64Dtype()), + ([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()), + ], +) +def test_to_integer_array_bool( + constructor, bool_values, int_values, target_dtype, expected_dtype +): + result = constructor(bool_values, dtype=target_dtype) + assert result.dtype == expected_dtype + expected = pd.array(int_values, dtype=target_dtype) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values, to_dtype, result_dtype", + [ + (np.array([1], dtype="int64"), None, Int64Dtype), + (np.array([1, np.nan]), None, Int64Dtype), + (np.array([1, np.nan]), "int8", Int8Dtype), + ], +) +def test_to_integer_array(values, to_dtype, result_dtype): + # convert existing arrays to IntegerArrays + result = IntegerArray._from_sequence(values, dtype=to_dtype) + assert result.dtype == result_dtype() + expected = pd.array(values, dtype=result_dtype()) + tm.assert_extension_array_equal(result, expected) + + +def test_integer_array_from_boolean(): + # GH31104 + expected = pd.array(np.array([True, False]), dtype="Int64") + result = pd.array(np.array([True, False], dtype=object), dtype="Int64") + tm.assert_extension_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_dtypes.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..8620763988e0676e00d8e61affa75848fe38dd48 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_dtypes.py @@ -0,0 +1,294 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.generic import ABCIndex + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.integer import ( + Int8Dtype, + UInt32Dtype, +) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + if dtype.is_signed_integer: + assert np.dtype(dtype.type).kind == "i" + else: + assert np.dtype(dtype.type).kind == "u" + assert dtype.name is not None + + +@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"]) +def test_preserve_dtypes(op): + # for ops that enable (mean would actually work here + # but generally it is a float return value) + df = pd.DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": pd.array([1, None, 3], dtype="Int64"), + } + ) + + # op + result = getattr(df.C, op)() + if op in {"sum", "prod", "min", "max"}: + assert isinstance(result, np.int64) + else: + assert isinstance(result, int) + + # groupby + result = getattr(df.groupby("A"), op)() + + expected = pd.DataFrame( + {"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Int64")}, + index=pd.Index(["a", "b"], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +def test_astype_nansafe(): + # see gh-22343 + arr = pd.array([np.nan, 1, 2], dtype="Int8") + msg = "cannot convert NA to integer" + + with pytest.raises(ValueError, match=msg): + arr.astype("uint32") + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_construct_index(all_data, dropna): + # ensure that we do not coerce to different Index dtype or non-index + + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Index(pd.array(other, dtype=all_data.dtype)) + expected = pd.Index(other, dtype=all_data.dtype) + assert all_data.dtype == expected.dtype # dont coerce to object + + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_astype_index(all_data, dropna): + # as an int/uint index to Index + + all_data = all_data[:10] + if dropna: + other = all_data[~all_data.isna()] + else: + other = all_data + + dtype = all_data.dtype + idx = pd.Index(np.array(other)) + assert isinstance(idx, ABCIndex) + + result = idx.astype(dtype) + expected = idx.astype(object).astype(dtype) + tm.assert_index_equal(result, expected) + + +def test_astype(all_data): + all_data = all_data[:10] + + ints = all_data[~all_data.isna()] + mixed = all_data + dtype = Int8Dtype() + + # coerce to same type - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype) + expected = pd.Series(ints) + tm.assert_series_equal(result, expected) + + # coerce to same other - ints + s = pd.Series(ints) + result = s.astype(dtype) + expected = pd.Series(ints, dtype=dtype) + tm.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype.numpy_dtype) + expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype)) + tm.assert_series_equal(result, expected) + + # coerce to same type - mixed + s = pd.Series(mixed) + result = s.astype(all_data.dtype) + expected = pd.Series(mixed) + tm.assert_series_equal(result, expected) + + # coerce to same other - mixed + s = pd.Series(mixed) + result = s.astype(dtype) + expected = pd.Series(mixed, dtype=dtype) + tm.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - mixed + s = pd.Series(mixed) + msg = "cannot convert NA to integer" + with pytest.raises(ValueError, match=msg): + s.astype(all_data.dtype.numpy_dtype) + + # coerce to object + s = pd.Series(mixed) + result = s.astype("object") + expected = pd.Series(np.asarray(mixed, dtype=object)) + tm.assert_series_equal(result, expected) + + +def test_astype_copy(): + arr = pd.array([1, 2, 3, None], dtype="Int64") + orig = pd.array([1, 2, 3, None], dtype="Int64") + + # copy=True -> ensure both data and mask are actual copies + result = arr.astype("Int64", copy=True) + assert result is not arr + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + # copy=False + result = arr.astype("Int64", copy=False) + assert result is arr + assert np.shares_memory(result._data, arr._data) + assert np.shares_memory(result._mask, arr._mask) + result[0] = 10 + assert arr[0] == 10 + result[0] = pd.NA + assert arr[0] is pd.NA + + # astype to different dtype -> always needs a copy -> even with copy=False + # we need to ensure that also the mask is actually copied + arr = pd.array([1, 2, 3, None], dtype="Int64") + orig = pd.array([1, 2, 3, None], dtype="Int64") + + result = arr.astype("Int32", copy=False) + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + +def test_astype_to_larger_numpy(): + a = pd.array([1, 2], dtype="Int32") + result = a.astype("int64") + expected = np.array([1, 2], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + a = pd.array([1, 2], dtype="UInt32") + result = a.astype("uint64") + expected = np.array([1, 2], dtype="uint64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"]) +def test_astype_specific_casting(dtype): + s = pd.Series([1, 2, 3], dtype="Int64") + result = s.astype(dtype) + expected = pd.Series([1, 2, 3], dtype=dtype) + tm.assert_series_equal(result, expected) + + s = pd.Series([1, 2, 3, None], dtype="Int64") + result = s.astype(dtype) + expected = pd.Series([1, 2, 3, None], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_astype_floating(): + arr = pd.array([1, 2, None], dtype="Int64") + result = arr.astype("Float64") + expected = pd.array([1.0, 2.0, None], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_dt64(): + # GH#32435 + arr = pd.array([1, 2, 3, pd.NA]) * 10**9 + + result = arr.astype("datetime64[ns]") + + expected = np.array([1, 2, 3, "NaT"], dtype="M8[s]").astype("M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + +def test_construct_cast_invalid(dtype): + msg = "cannot safely" + arr = [1.2, 2.3, 3.7] + with pytest.raises(TypeError, match=msg): + pd.array(arr, dtype=dtype) + + with pytest.raises(TypeError, match=msg): + pd.Series(arr).astype(dtype) + + arr = [1.2, 2.3, 3.7, np.nan] + with pytest.raises(TypeError, match=msg): + pd.array(arr, dtype=dtype) + + with pytest.raises(TypeError, match=msg): + pd.Series(arr).astype(dtype) + + +@pytest.mark.parametrize("in_series", [True, False]) +def test_to_numpy_na_nan(in_series): + a = pd.array([0, 1, None], dtype="Int64") + if in_series: + a = pd.Series(a) + + result = a.to_numpy(dtype="float64", na_value=np.nan) + expected = np.array([0.0, 1.0, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + result = a.to_numpy(dtype="int64", na_value=-1) + expected = np.array([0, 1, -1], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = a.to_numpy(dtype="bool", na_value=False) + expected = np.array([False, True, False], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("in_series", [True, False]) +@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"]) +def test_to_numpy_dtype(dtype, in_series): + a = pd.array([0, 1], dtype="Int64") + if in_series: + a = pd.Series(a) + + result = a.to_numpy(dtype=dtype) + expected = np.array([0, 1], dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["int64", "bool"]) +def test_to_numpy_na_raises(dtype): + a = pd.array([0, 1, None], dtype="Int64") + with pytest.raises(ValueError, match=dtype): + a.to_numpy(dtype=dtype) + + +def test_astype_str(): + a = pd.array([1, 2, None], dtype="Int64") + expected = np.array(["1", "2", ""], dtype=f"{tm.ENDIAN}U21") + + tm.assert_numpy_array_equal(a.astype(str), expected) + tm.assert_numpy_array_equal(a.astype("str"), expected) + + +def test_astype_boolean(): + # https://github.com/pandas-dev/pandas/issues/31102 + a = pd.array([1, 0, -1, 2, None], dtype="Int64") + result = a.astype("boolean") + expected = pd.array([True, False, True, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_function.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_function.py new file mode 100644 index 0000000000000000000000000000000000000000..d48b636a98feb94c5eaeee9eefbf5730fe9c6f79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_function.py @@ -0,0 +1,203 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray + + +@pytest.mark.parametrize("ufunc", [np.abs, np.sign]) +# np.sign emits a warning with nans, +@pytest.mark.filterwarnings("ignore:invalid value encountered in sign:RuntimeWarning") +def test_ufuncs_single_int(ufunc): + a = pd.array([1, 2, -3, np.nan]) + result = ufunc(a) + expected = pd.array(ufunc(a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + result = ufunc(s) + expected = pd.Series(pd.array(ufunc(a.astype(float)), dtype="Int64")) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt]) +def test_ufuncs_single_float(ufunc): + a = pd.array([1, 2, -3, np.nan]) + with np.errstate(invalid="ignore"): + result = ufunc(a) + expected = FloatingArray(ufunc(a.astype(float)), mask=a._mask) + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + with np.errstate(invalid="ignore"): + result = ufunc(s) + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.add, np.subtract]) +def test_ufuncs_binary_int(ufunc): + # two IntegerArrays + a = pd.array([1, 2, -3, np.nan]) + result = ufunc(a, a) + expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + # IntegerArray with numpy array + arr = np.array([1, 2, 3, 4]) + result = ufunc(a, arr) + expected = pd.array(ufunc(a.astype(float), arr), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(arr, a) + expected = pd.array(ufunc(arr, a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + # IntegerArray with scalar + result = ufunc(a, 1) + expected = pd.array(ufunc(a.astype(float), 1), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(1, a) + expected = pd.array(ufunc(1, a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +def test_ufunc_binary_output(): + a = pd.array([1, 2, np.nan]) + result = np.modf(a) + expected = np.modf(a.to_numpy(na_value=np.nan, dtype="float")) + expected = (pd.array(expected[0]), pd.array(expected[1])) + + assert isinstance(result, tuple) + assert len(result) == 2 + + for x, y in zip(result, expected): + tm.assert_extension_array_equal(x, y) + + +@pytest.mark.parametrize("values", [[0, 1], [0, None]]) +def test_ufunc_reduce_raises(values): + arr = pd.array(values) + + res = np.add.reduce(arr) + expected = arr.sum(skipna=False) + tm.assert_almost_equal(res, expected) + + +@pytest.mark.parametrize( + "pandasmethname, kwargs", + [ + ("var", {"ddof": 0}), + ("var", {"ddof": 1}), + ("std", {"ddof": 0}), + ("std", {"ddof": 1}), + ("kurtosis", {}), + ("skew", {}), + ("sem", {}), + ], +) +def test_stat_method(pandasmethname, kwargs): + s = pd.Series(data=[1, 2, 3, 4, 5, 6, np.nan, np.nan], dtype="Int64") + pandasmeth = getattr(s, pandasmethname) + result = pandasmeth(**kwargs) + s2 = pd.Series(data=[1, 2, 3, 4, 5, 6], dtype="Int64") + pandasmeth = getattr(s2, pandasmethname) + expected = pandasmeth(**kwargs) + assert expected == result + + +def test_value_counts_na(): + arr = pd.array([1, 2, 1, pd.NA], dtype="Int64") + result = arr.value_counts(dropna=False) + ex_index = pd.Index([1, 2, pd.NA], dtype="Int64") + assert ex_index.dtype == "Int64" + expected = pd.Series([2, 1, 1], index=ex_index, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([2, 1], index=arr[:2], dtype="Int64", name="count") + assert expected.index.dtype == arr.dtype + tm.assert_series_equal(result, expected) + + +def test_value_counts_empty(): + # https://github.com/pandas-dev/pandas/issues/33317 + ser = pd.Series([], dtype="Int64") + result = ser.value_counts() + idx = pd.Index([], dtype=ser.dtype) + assert idx.dtype == ser.dtype + expected = pd.Series([], index=idx, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_with_normalize(): + # GH 33172 + ser = pd.Series([1, 2, 1, pd.NA], dtype="Int64") + result = ser.value_counts(normalize=True) + expected = pd.Series([2, 1], index=ser[:2], dtype="Float64", name="proportion") / 3 + assert expected.index.dtype == ser.dtype + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 4]) +def test_integer_array_sum(skipna, min_count, any_int_ea_dtype): + dtype = any_int_ea_dtype + arr = pd.array([1, 2, 3, None], dtype=dtype) + result = arr.sum(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 6 + else: + assert result is pd.NA + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("method", ["min", "max"]) +def test_integer_array_min_max(skipna, method, any_int_ea_dtype): + dtype = any_int_ea_dtype + arr = pd.array([0, 1, None], dtype=dtype) + func = getattr(arr, method) + result = func(skipna=skipna) + if skipna: + assert result == (0 if method == "min" else 1) + else: + assert result is pd.NA + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 9]) +def test_integer_array_prod(skipna, min_count, any_int_ea_dtype): + dtype = any_int_ea_dtype + arr = pd.array([1, 2, None], dtype=dtype) + result = arr.prod(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 2 + else: + assert result is pd.NA + + +@pytest.mark.parametrize( + "values, expected", [([1, 2, 3], 6), ([1, 2, 3, None], 6), ([None], 0)] +) +def test_integer_array_numpy_sum(values, expected): + arr = pd.array(values, dtype="Int64") + result = np.sum(arr) + assert result == expected + + +@pytest.mark.parametrize("op", ["sum", "prod", "min", "max"]) +def test_dataframe_reductions(op): + # https://github.com/pandas-dev/pandas/pull/32867 + # ensure the integers are not cast to float during reductions + df = pd.DataFrame({"a": pd.array([1, 2], dtype="Int64")}) + result = df.max() + assert isinstance(result["a"], np.int64) + + +# TODO(jreback) - these need testing / are broken + +# shift + +# set_index (destroys type) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_reduction.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..db04862e4ea0797ffcc1562bbcb603448131fae8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_reduction.py @@ -0,0 +1,125 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + array, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", np.int64(3)], + ["prod", np.int64(2)], + ["min", np.int64(1)], + ["max", np.int64(2)], + ["mean", np.float64(1.5)], + ["median", np.float64(1.5)], + ["var", np.float64(0.5)], + ["std", np.float64(0.5**0.5)], + ["skew", pd.NA], + ["kurt", pd.NA], + ["any", True], + ["all", True], + ], +) +def test_series_reductions(op, expected): + ser = Series([1, 2], dtype="Int64") + result = getattr(ser, op)() + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", Series([3], index=["a"], dtype="Int64")], + ["prod", Series([2], index=["a"], dtype="Int64")], + ["min", Series([1], index=["a"], dtype="Int64")], + ["max", Series([2], index=["a"], dtype="Int64")], + ["mean", Series([1.5], index=["a"], dtype="Float64")], + ["median", Series([1.5], index=["a"], dtype="Float64")], + ["var", Series([0.5], index=["a"], dtype="Float64")], + ["std", Series([0.5**0.5], index=["a"], dtype="Float64")], + ["skew", Series([pd.NA], index=["a"], dtype="Float64")], + ["kurt", Series([pd.NA], index=["a"], dtype="Float64")], + ["any", Series([True], index=["a"], dtype="boolean")], + ["all", Series([True], index=["a"], dtype="boolean")], + ], +) +def test_dataframe_reductions(op, expected): + df = DataFrame({"a": array([1, 2], dtype="Int64")}) + result = getattr(df, op)() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", array([1, 3], dtype="Int64")], + ["prod", array([1, 3], dtype="Int64")], + ["min", array([1, 3], dtype="Int64")], + ["max", array([1, 3], dtype="Int64")], + ["mean", array([1, 3], dtype="Float64")], + ["median", array([1, 3], dtype="Float64")], + ["var", array([pd.NA], dtype="Float64")], + ["std", array([pd.NA], dtype="Float64")], + ["skew", array([pd.NA], dtype="Float64")], + ["any", array([True, True], dtype="boolean")], + ["all", array([True, True], dtype="boolean")], + ], +) +def test_groupby_reductions(op, expected): + df = DataFrame( + { + "A": ["a", "b", "b"], + "B": array([1, None, 3], dtype="Int64"), + } + ) + result = getattr(df.groupby("A"), op)() + expected = DataFrame(expected, index=pd.Index(["a", "b"], name="A"), columns=["B"]) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", Series([4, 4], index=["B", "C"], dtype="Float64")], + ["prod", Series([3, 3], index=["B", "C"], dtype="Float64")], + ["min", Series([1, 1], index=["B", "C"], dtype="Float64")], + ["max", Series([3, 3], index=["B", "C"], dtype="Float64")], + ["mean", Series([2, 2], index=["B", "C"], dtype="Float64")], + ["median", Series([2, 2], index=["B", "C"], dtype="Float64")], + ["var", Series([2, 2], index=["B", "C"], dtype="Float64")], + ["std", Series([2**0.5, 2**0.5], index=["B", "C"], dtype="Float64")], + ["skew", Series([pd.NA, pd.NA], index=["B", "C"], dtype="Float64")], + ["kurt", Series([pd.NA, pd.NA], index=["B", "C"], dtype="Float64")], + ["any", Series([True, True, True], index=["A", "B", "C"], dtype="boolean")], + ["all", Series([True, True, True], index=["A", "B", "C"], dtype="boolean")], + ], +) +def test_mixed_reductions(op, expected, using_infer_string): + if op in ["any", "all"] and using_infer_string: + expected = expected.astype("bool") + df = DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": array([1, None, 3], dtype="Int64"), + } + ) + + # series + result = getattr(df.C, op)() + tm.assert_equal(result, expected["C"]) + + # frame + if op in ["any", "all"]: + result = getattr(df, op)() + else: + result = getattr(df, op)(numeric_only=True) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_repr.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..168210eed5d06a461bbf42dd1e1fae3db0fd851c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_repr.py @@ -0,0 +1,67 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.integer import ( + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, +) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + if dtype.is_signed_integer: + assert np.dtype(dtype.type).kind == "i" + else: + assert np.dtype(dtype.type).kind == "u" + assert dtype.name is not None + + +@pytest.mark.parametrize( + "dtype, expected", + [ + (Int8Dtype(), "Int8Dtype()"), + (Int16Dtype(), "Int16Dtype()"), + (Int32Dtype(), "Int32Dtype()"), + (Int64Dtype(), "Int64Dtype()"), + (UInt8Dtype(), "UInt8Dtype()"), + (UInt16Dtype(), "UInt16Dtype()"), + (UInt32Dtype(), "UInt32Dtype()"), + (UInt64Dtype(), "UInt64Dtype()"), + ], +) +def test_repr_dtype(dtype, expected): + assert repr(dtype) == expected + + +def test_repr_array(): + result = repr(pd.array([1, None, 3])) + expected = "\n[1, , 3]\nLength: 3, dtype: Int64" + assert result == expected + + +def test_repr_array_long(): + data = pd.array([1, 2, None] * 1000) + expected = ( + "\n" + "[ 1, 2, , 1, 2, , 1, 2, , 1,\n" + " ...\n" + " , 1, 2, , 1, 2, , 1, 2, ]\n" + "Length: 3000, dtype: Int64" + ) + result = repr(data) + assert result == expected + + +def test_frame_repr(data_missing): + df = pd.DataFrame({"A": data_missing}) + result = repr(df) + expected = " A\n0 \n1 1" + assert result == expected diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae941c59a2bcdfb0732fa4a46eabb262156d4678 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ead98bf0386ccdca5fef8f174c160ee2352aa3c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/generate_legacy_storage_files.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/generate_legacy_storage_files.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52752ade59b86aa1083b332508ad1e90c2483075 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/generate_legacy_storage_files.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_clipboard.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_clipboard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88c6ea46fc0d9f532b6bb780c4725eeeb41d50fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_clipboard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2453abc4c90a5ade8d0243fd73dfbb0f20e18460 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05d2b040f0274ba21971cd869fcffd0c0c15cfd2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_feather.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_feather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bf5ea2542c463563ef1ad3b895086a305bbe24e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_feather.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23b56a625c3cb559d0315340136a6bbd326a0957 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gbq.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gbq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ec75be48c3888051093b8ca982693f230736ca6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gbq.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gcs.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gcs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..614ed32c4df8a84893105fedb9a805d8b1ecdc02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gcs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_html.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_html.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a1f1255e0e5b8f8da522cff8ea6d16169e14c34 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_html.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_http_headers.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_http_headers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9872939dd24ca555d887c8b08ef8cdbae4a40913 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_http_headers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_orc.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_orc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..110112c179c059b17e7d5f1df67e5d8b4b5fb37e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_orc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_parquet.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_parquet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..143d5750ce4adfe3d2d2dbf4ac51d6fc9c5b318f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_parquet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_pickle.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae516de7a59421f48206979ff542fb05dded7459 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_pickle.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_s3.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_s3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cdd15bdb24944d90eedfc7985ca3806cf094582 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_s3.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4130e7454a41c7c1a46b15d6ea06016ef5fe98a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_sql.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_sql.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93e9346c80de34925f13d521d69704bf24f64ee5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_sql.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_stata.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_stata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23aea5443aeaa2c0f82517eaf5d4afc157549344 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_stata.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7d34c85c01599707e648c4d9964773d16a13fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py @@ -0,0 +1,130 @@ +from io import ( + BytesIO, + StringIO, +) + +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + + +def test_compression_roundtrip(compression): + df = pd.DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + + with tm.ensure_clean() as path: + df.to_json(path, compression=compression) + tm.assert_frame_equal(df, pd.read_json(path, compression=compression)) + + # explicitly ensure file was compressed. + with tm.decompress_file(path, compression) as fh: + result = fh.read().decode("utf8") + data = StringIO(result) + tm.assert_frame_equal(df, pd.read_json(data)) + + +def test_read_zipped_json(datapath): + uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json") + uncompressed_df = pd.read_json(uncompressed_path) + + compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip") + compressed_df = pd.read_json(compressed_path, compression="zip") + + tm.assert_frame_equal(uncompressed_df, compressed_df) + + +@td.skip_if_not_us_locale +@pytest.mark.single_cpu +def test_with_s3_url(compression, s3_public_bucket, s3so): + # Bucket created in tests/io/conftest.py + df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}')) + + with tm.ensure_clean() as path: + df.to_json(path, compression=compression) + with open(path, "rb") as f: + s3_public_bucket.put_object(Key="test-1", Body=f) + + roundtripped_df = pd.read_json( + f"s3://{s3_public_bucket.name}/test-1", + compression=compression, + storage_options=s3so, + ) + tm.assert_frame_equal(df, roundtripped_df) + + +def test_lines_with_compression(compression): + with tm.ensure_clean() as path: + df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}')) + df.to_json(path, orient="records", lines=True, compression=compression) + roundtripped_df = pd.read_json(path, lines=True, compression=compression) + tm.assert_frame_equal(df, roundtripped_df) + + +def test_chunksize_with_compression(compression): + with tm.ensure_clean() as path: + df = pd.read_json(StringIO('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')) + df.to_json(path, orient="records", lines=True, compression=compression) + + with pd.read_json( + path, lines=True, chunksize=1, compression=compression + ) as res: + roundtripped_df = pd.concat(res) + tm.assert_frame_equal(df, roundtripped_df) + + +def test_write_unsupported_compression_type(): + df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}')) + with tm.ensure_clean() as path: + msg = "Unrecognized compression type: unsupported" + with pytest.raises(ValueError, match=msg): + df.to_json(path, compression="unsupported") + + +def test_read_unsupported_compression_type(): + with tm.ensure_clean() as path: + msg = "Unrecognized compression type: unsupported" + with pytest.raises(ValueError, match=msg): + pd.read_json(path, compression="unsupported") + + +@pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] +) +@pytest.mark.parametrize("to_infer", [True, False]) +@pytest.mark.parametrize("read_infer", [True, False]) +def test_to_json_compression( + compression_only, read_infer, to_infer, compression_to_extension, infer_string +): + with pd.option_context("future.infer_string", infer_string): + # see gh-15008 + compression = compression_only + + # We'll complete file extension subsequently. + filename = "test." + filename += compression_to_extension[compression] + + df = pd.DataFrame({"A": [1]}) + + to_compression = "infer" if to_infer else compression + read_compression = "infer" if read_infer else compression + + with tm.ensure_clean(filename) as path: + df.to_json(path, compression=to_compression) + result = pd.read_json(path, compression=read_compression) + tm.assert_frame_equal(result, df) + + +def test_to_json_compression_mode(compression): + # GH 39985 (read_json does not support user-provided binary files) + expected = pd.DataFrame({"A": [1]}) + + with BytesIO() as buffer: + expected.to_json(buffer, compression=compression) + # df = pd.read_json(buffer, compression=compression) + # tm.assert_frame_equal(expected, df) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py new file mode 100644 index 0000000000000000000000000000000000000000..cc88fc3ba18263ac78f7057bfb5950f0420646b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py @@ -0,0 +1,21 @@ +""" +Tests for the deprecated keyword arguments for `read_json`. +""" +from io import StringIO + +import pandas as pd +import pandas._testing as tm + +from pandas.io.json import read_json + + +def test_good_kwargs(): + df = pd.DataFrame({"A": [2, 4, 6], "B": [3, 6, 9]}, index=[0, 1, 2]) + + with tm.assert_produces_warning(None): + data1 = StringIO(df.to_json(orient="split")) + tm.assert_frame_equal(df, read_json(data1, orient="split")) + data2 = StringIO(df.to_json(orient="columns")) + tm.assert_frame_equal(df, read_json(data2, orient="columns")) + data3 = StringIO(df.to_json(orient="index")) + tm.assert_frame_equal(df, read_json(data3, orient="index")) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..cc101bb9c8b6d7c5f230b408ecf060a4af520106 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py @@ -0,0 +1,873 @@ +"""Tests for Table Schema integration.""" +from collections import OrderedDict +from io import StringIO +import json + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + PeriodDtype, +) + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.json._table_schema import ( + as_json_table_type, + build_table_schema, + convert_json_field_to_pandas_type, + convert_pandas_type_to_json_field, + set_default_names, +) + + +@pytest.fixture +def df_schema(): + return DataFrame( + { + "A": [1, 2, 3, 4], + "B": ["a", "b", "c", "c"], + "C": pd.date_range("2016-01-01", freq="d", periods=4), + "D": pd.timedelta_range("1h", periods=4, freq="min"), + }, + index=pd.Index(range(4), name="idx"), + ) + + +@pytest.fixture +def df_table(): + return DataFrame( + { + "A": [1, 2, 3, 4], + "B": ["a", "b", "c", "c"], + "C": pd.date_range("2016-01-01", freq="d", periods=4), + "D": pd.timedelta_range("1h", periods=4, freq="min"), + "E": pd.Series(pd.Categorical(["a", "b", "c", "c"])), + "F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)), + "G": [1.0, 2.0, 3, 4.0], + "H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"), + }, + index=pd.Index(range(4), name="idx"), + ) + + +class TestBuildSchema: + def test_build_table_schema(self, df_schema, using_infer_string): + result = build_table_schema(df_schema, version=False) + expected = { + "fields": [ + {"name": "idx", "type": "integer"}, + {"name": "A", "type": "integer"}, + {"name": "B", "type": "string"}, + {"name": "C", "type": "datetime"}, + {"name": "D", "type": "duration"}, + ], + "primaryKey": ["idx"], + } + if using_infer_string: + expected["fields"][2] = {"name": "B", "type": "any", "extDtype": "string"} + assert result == expected + result = build_table_schema(df_schema) + assert "pandas_version" in result + + def test_series(self): + s = pd.Series([1, 2, 3], name="foo") + result = build_table_schema(s, version=False) + expected = { + "fields": [ + {"name": "index", "type": "integer"}, + {"name": "foo", "type": "integer"}, + ], + "primaryKey": ["index"], + } + assert result == expected + result = build_table_schema(s) + assert "pandas_version" in result + + def test_series_unnamed(self): + result = build_table_schema(pd.Series([1, 2, 3]), version=False) + expected = { + "fields": [ + {"name": "index", "type": "integer"}, + {"name": "values", "type": "integer"}, + ], + "primaryKey": ["index"], + } + assert result == expected + + def test_multiindex(self, df_schema, using_infer_string): + df = df_schema + idx = pd.MultiIndex.from_product([("a", "b"), (1, 2)]) + df.index = idx + + result = build_table_schema(df, version=False) + expected = { + "fields": [ + {"name": "level_0", "type": "string"}, + {"name": "level_1", "type": "integer"}, + {"name": "A", "type": "integer"}, + {"name": "B", "type": "string"}, + {"name": "C", "type": "datetime"}, + {"name": "D", "type": "duration"}, + ], + "primaryKey": ["level_0", "level_1"], + } + if using_infer_string: + expected["fields"][0] = { + "name": "level_0", + "type": "any", + "extDtype": "string", + } + expected["fields"][3] = {"name": "B", "type": "any", "extDtype": "string"} + assert result == expected + + df.index.names = ["idx0", None] + expected["fields"][0]["name"] = "idx0" + expected["primaryKey"] = ["idx0", "level_1"] + result = build_table_schema(df, version=False) + assert result == expected + + +class TestTableSchemaType: + @pytest.mark.parametrize("int_type", [int, np.int16, np.int32, np.int64]) + def test_as_json_table_type_int_data(self, int_type): + int_data = [1, 2, 3] + assert as_json_table_type(np.array(int_data, dtype=int_type).dtype) == "integer" + + @pytest.mark.parametrize("float_type", [float, np.float16, np.float32, np.float64]) + def test_as_json_table_type_float_data(self, float_type): + float_data = [1.0, 2.0, 3.0] + assert ( + as_json_table_type(np.array(float_data, dtype=float_type).dtype) == "number" + ) + + @pytest.mark.parametrize("bool_type", [bool, np.bool_]) + def test_as_json_table_type_bool_data(self, bool_type): + bool_data = [True, False] + assert ( + as_json_table_type(np.array(bool_data, dtype=bool_type).dtype) == "boolean" + ) + + @pytest.mark.parametrize( + "date_data", + [ + pd.to_datetime(["2016"]), + pd.to_datetime(["2016"], utc=True), + pd.Series(pd.to_datetime(["2016"])), + pd.Series(pd.to_datetime(["2016"], utc=True)), + pd.period_range("2016", freq="Y", periods=3), + ], + ) + def test_as_json_table_type_date_data(self, date_data): + assert as_json_table_type(date_data.dtype) == "datetime" + + @pytest.mark.parametrize( + "str_data", + [pd.Series(["a", "b"], dtype=object), pd.Index(["a", "b"], dtype=object)], + ) + def test_as_json_table_type_string_data(self, str_data): + assert as_json_table_type(str_data.dtype) == "string" + + @pytest.mark.parametrize( + "cat_data", + [ + pd.Categorical(["a"]), + pd.Categorical([1]), + pd.Series(pd.Categorical([1])), + pd.CategoricalIndex([1]), + pd.Categorical([1]), + ], + ) + def test_as_json_table_type_categorical_data(self, cat_data): + assert as_json_table_type(cat_data.dtype) == "any" + + # ------ + # dtypes + # ------ + @pytest.mark.parametrize("int_dtype", [int, np.int16, np.int32, np.int64]) + def test_as_json_table_type_int_dtypes(self, int_dtype): + assert as_json_table_type(int_dtype) == "integer" + + @pytest.mark.parametrize("float_dtype", [float, np.float16, np.float32, np.float64]) + def test_as_json_table_type_float_dtypes(self, float_dtype): + assert as_json_table_type(float_dtype) == "number" + + @pytest.mark.parametrize("bool_dtype", [bool, np.bool_]) + def test_as_json_table_type_bool_dtypes(self, bool_dtype): + assert as_json_table_type(bool_dtype) == "boolean" + + @pytest.mark.parametrize( + "date_dtype", + [ + np.dtype("=1" + + with pytest.raises(ValueError, match=msg): + with read_json( + StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine + ) as _: + pass + + +@pytest.mark.parametrize("chunksize", [None, 1, 2]) +def test_readjson_chunks_multiple_empty_lines(chunksize): + j = """ + + {"A":1,"B":4} + + + + {"A":2,"B":5} + + + + + + + + {"A":3,"B":6} + """ + orig = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + test = read_json(StringIO(j), lines=True, chunksize=chunksize) + if chunksize is not None: + with test: + test = pd.concat(test) + tm.assert_frame_equal(orig, test, obj=f"chunksize: {chunksize}") + + +def test_readjson_unicode(request, monkeypatch, engine): + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + with tm.ensure_clean("test.json") as path: + monkeypatch.setattr("locale.getpreferredencoding", lambda do_setlocale: "cp949") + with open(path, "w", encoding="utf-8") as f: + f.write('{"£©µÀÆÖÞßéöÿ":["АБВГДабвгд가"]}') + + result = read_json(path, engine=engine) + expected = DataFrame({"£©µÀÆÖÞßéöÿ": ["АБВГДабвгд가"]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("nrows", [1, 2]) +def test_readjson_nrows(nrows, engine): + # GH 33916 + # Test reading line-format JSON to Series with nrows param + jsonl = """{"a": 1, "b": 2} + {"a": 3, "b": 4} + {"a": 5, "b": 6} + {"a": 7, "b": 8}""" + result = read_json(StringIO(jsonl), lines=True, nrows=nrows) + expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("nrows,chunksize", [(2, 2), (4, 2)]) +def test_readjson_nrows_chunks(request, nrows, chunksize, engine): + # GH 33916 + # Test reading line-format JSON to Series with nrows and chunksize param + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + jsonl = """{"a": 1, "b": 2} + {"a": 3, "b": 4} + {"a": 5, "b": 6} + {"a": 7, "b": 8}""" + + if engine != "pyarrow": + with read_json( + StringIO(jsonl), lines=True, nrows=nrows, chunksize=chunksize, engine=engine + ) as reader: + chunked = pd.concat(reader) + else: + with read_json( + jsonl, lines=True, nrows=nrows, chunksize=chunksize, engine=engine + ) as reader: + chunked = pd.concat(reader) + expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows] + tm.assert_frame_equal(chunked, expected) + + +def test_readjson_nrows_requires_lines(engine): + # GH 33916 + # Test ValueError raised if nrows is set without setting lines in read_json + jsonl = """{"a": 1, "b": 2} + {"a": 3, "b": 4} + {"a": 5, "b": 6} + {"a": 7, "b": 8}""" + msg = "nrows can only be passed if lines=True" + with pytest.raises(ValueError, match=msg): + read_json(jsonl, lines=False, nrows=2, engine=engine) + + +def test_readjson_lines_chunks_fileurl(request, datapath, engine): + # GH 27135 + # Test reading line-format JSON from file url + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + df_list_expected = [ + DataFrame([[1, 2]], columns=["a", "b"], index=[0]), + DataFrame([[3, 4]], columns=["a", "b"], index=[1]), + DataFrame([[5, 6]], columns=["a", "b"], index=[2]), + ] + os_path = datapath("io", "json", "data", "line_delimited.json") + file_url = Path(os_path).as_uri() + with read_json(file_url, lines=True, chunksize=1, engine=engine) as url_reader: + for index, chuck in enumerate(url_reader): + tm.assert_frame_equal(chuck, df_list_expected[index]) + + +def test_chunksize_is_incremental(): + # See https://github.com/pandas-dev/pandas/issues/34548 + jsonl = ( + """{"a": 1, "b": 2} + {"a": 3, "b": 4} + {"a": 5, "b": 6} + {"a": 7, "b": 8}\n""" + * 1000 + ) + + class MyReader: + def __init__(self, contents) -> None: + self.read_count = 0 + self.stringio = StringIO(contents) + + def read(self, *args): + self.read_count += 1 + return self.stringio.read(*args) + + def __iter__(self) -> Iterator: + self.read_count += 1 + return iter(self.stringio) + + reader = MyReader(jsonl) + assert len(list(read_json(reader, lines=True, chunksize=100))) > 1 + assert reader.read_count > 10 + + +@pytest.mark.parametrize("orient_", ["split", "index", "table"]) +def test_to_json_append_orient(orient_): + # GH 35849 + # Test ValueError when orient is not 'records' + df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + msg = ( + r"mode='a' \(append\) is only supported when " + "lines is True and orient is 'records'" + ) + with pytest.raises(ValueError, match=msg): + df.to_json(mode="a", orient=orient_) + + +def test_to_json_append_lines(): + # GH 35849 + # Test ValueError when lines is not True + df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + msg = ( + r"mode='a' \(append\) is only supported when " + "lines is True and orient is 'records'" + ) + with pytest.raises(ValueError, match=msg): + df.to_json(mode="a", lines=False, orient="records") + + +@pytest.mark.parametrize("mode_", ["r", "x"]) +def test_to_json_append_mode(mode_): + # GH 35849 + # Test ValueError when mode is not supported option + df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + msg = ( + f"mode={mode_} is not a valid option." + "Only 'w' and 'a' are currently supported." + ) + with pytest.raises(ValueError, match=msg): + df.to_json(mode=mode_, lines=False, orient="records") + + +def test_to_json_append_output_consistent_columns(): + # GH 35849 + # Testing that resulting output reads in as expected. + # Testing same columns, new rows + df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]}) + + expected = DataFrame({"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]}) + with tm.ensure_clean("test.json") as path: + # Save dataframes to the same file + df1.to_json(path, lines=True, orient="records") + df2.to_json(path, mode="a", lines=True, orient="records") + + # Read path file + result = read_json(path, lines=True) + tm.assert_frame_equal(result, expected) + + +def test_to_json_append_output_inconsistent_columns(): + # GH 35849 + # Testing that resulting output reads in as expected. + # Testing one new column, one old column, new rows + df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]}) + + expected = DataFrame( + { + "col1": [1, 2, None, None], + "col2": ["a", "b", "e", "f"], + "col3": [np.nan, np.nan, "!", "#"], + } + ) + with tm.ensure_clean("test.json") as path: + # Save dataframes to the same file + df1.to_json(path, mode="a", lines=True, orient="records") + df3.to_json(path, mode="a", lines=True, orient="records") + + # Read path file + result = read_json(path, lines=True) + tm.assert_frame_equal(result, expected) + + +def test_to_json_append_output_different_columns(): + # GH 35849 + # Testing that resulting output reads in as expected. + # Testing same, differing and new columns + df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]}) + df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]}) + df4 = DataFrame({"col4": [True, False]}) + + expected = DataFrame( + { + "col1": [1, 2, 3, 4, None, None, None, None], + "col2": ["a", "b", "c", "d", "e", "f", np.nan, np.nan], + "col3": [np.nan, np.nan, np.nan, np.nan, "!", "#", np.nan, np.nan], + "col4": [None, None, None, None, None, None, True, False], + } + ).astype({"col4": "float"}) + with tm.ensure_clean("test.json") as path: + # Save dataframes to the same file + df1.to_json(path, mode="a", lines=True, orient="records") + df2.to_json(path, mode="a", lines=True, orient="records") + df3.to_json(path, mode="a", lines=True, orient="records") + df4.to_json(path, mode="a", lines=True, orient="records") + + # Read path file + result = read_json(path, lines=True) + tm.assert_frame_equal(result, expected) + + +def test_to_json_append_output_different_columns_reordered(): + # GH 35849 + # Testing that resulting output reads in as expected. + # Testing specific result column order. + df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]}) + df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]}) + df4 = DataFrame({"col4": [True, False]}) + + # df4, df3, df2, df1 (in that order) + expected = DataFrame( + { + "col4": [True, False, None, None, None, None, None, None], + "col2": [np.nan, np.nan, "e", "f", "c", "d", "a", "b"], + "col3": [np.nan, np.nan, "!", "#", np.nan, np.nan, np.nan, np.nan], + "col1": [None, None, None, None, 3, 4, 1, 2], + } + ).astype({"col4": "float"}) + with tm.ensure_clean("test.json") as path: + # Save dataframes to the same file + df4.to_json(path, mode="a", lines=True, orient="records") + df3.to_json(path, mode="a", lines=True, orient="records") + df2.to_json(path, mode="a", lines=True, orient="records") + df1.to_json(path, mode="a", lines=True, orient="records") + + # Read path file + result = read_json(path, lines=True) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py new file mode 100644 index 0000000000000000000000000000000000000000..56ea9ea625dff721a2e989d858ae89c22aa69f4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py @@ -0,0 +1,1087 @@ +import calendar +import datetime +import decimal +import json +import locale +import math +import re +import time + +import dateutil +import numpy as np +import pytest +import pytz + +import pandas._libs.json as ujson +from pandas.compat import IS64 + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + NaT, + PeriodIndex, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm + + +def _clean_dict(d): + """ + Sanitize dictionary for JSON by converting all keys to strings. + + Parameters + ---------- + d : dict + The dictionary to convert. + + Returns + ------- + cleaned_dict : dict + """ + return {str(k): v for k, v in d.items()} + + +@pytest.fixture( + params=[None, "split", "records", "values", "index"] # Column indexed by default. +) +def orient(request): + return request.param + + +class TestUltraJSONTests: + @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865") + def test_encode_decimal(self): + sut = decimal.Decimal("1337.1337") + encoded = ujson.ujson_dumps(sut, double_precision=15) + decoded = ujson.ujson_loads(encoded) + assert decoded == 1337.1337 + + sut = decimal.Decimal("0.95") + encoded = ujson.ujson_dumps(sut, double_precision=1) + assert encoded == "1.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 1.0 + + sut = decimal.Decimal("0.94") + encoded = ujson.ujson_dumps(sut, double_precision=1) + assert encoded == "0.9" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 0.9 + + sut = decimal.Decimal("1.95") + encoded = ujson.ujson_dumps(sut, double_precision=1) + assert encoded == "2.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 2.0 + + sut = decimal.Decimal("-1.95") + encoded = ujson.ujson_dumps(sut, double_precision=1) + assert encoded == "-2.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == -2.0 + + sut = decimal.Decimal("0.995") + encoded = ujson.ujson_dumps(sut, double_precision=2) + assert encoded == "1.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 1.0 + + sut = decimal.Decimal("0.9995") + encoded = ujson.ujson_dumps(sut, double_precision=3) + assert encoded == "1.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 1.0 + + sut = decimal.Decimal("0.99999999999999944") + encoded = ujson.ujson_dumps(sut, double_precision=15) + assert encoded == "1.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 1.0 + + @pytest.mark.parametrize("ensure_ascii", [True, False]) + def test_encode_string_conversion(self, ensure_ascii): + string_input = "A string \\ / \b \f \n \r \t &" + not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"' + html_encoded = ( + '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"' + ) + + def helper(expected_output, **encode_kwargs): + output = ujson.ujson_dumps( + string_input, ensure_ascii=ensure_ascii, **encode_kwargs + ) + + assert output == expected_output + assert string_input == json.loads(output) + assert string_input == ujson.ujson_loads(output) + + # Default behavior assumes encode_html_chars=False. + helper(not_html_encoded) + + # Make sure explicit encode_html_chars=False works. + helper(not_html_encoded, encode_html_chars=False) + + # Make sure explicit encode_html_chars=True does the encoding. + helper(html_encoded, encode_html_chars=True) + + @pytest.mark.parametrize( + "long_number", [-4342969734183514, -12345678901234.56789012, -528656961.4399388] + ) + def test_double_long_numbers(self, long_number): + sut = {"a": long_number} + encoded = ujson.ujson_dumps(sut, double_precision=15) + + decoded = ujson.ujson_loads(encoded) + assert sut == decoded + + def test_encode_non_c_locale(self): + lc_category = locale.LC_NUMERIC + + # We just need one of these locales to work. + for new_locale in ("it_IT.UTF-8", "Italian_Italy"): + if tm.can_set_locale(new_locale, lc_category): + with tm.set_locale(new_locale, lc_category): + assert ujson.ujson_loads(ujson.ujson_dumps(4.78e60)) == 4.78e60 + assert ujson.ujson_loads("4.78", precise_float=True) == 4.78 + break + + def test_decimal_decode_test_precise(self): + sut = {"a": 4.56} + encoded = ujson.ujson_dumps(sut) + decoded = ujson.ujson_loads(encoded, precise_float=True) + assert sut == decoded + + def test_encode_double_tiny_exponential(self): + num = 1e-40 + assert num == ujson.ujson_loads(ujson.ujson_dumps(num)) + num = 1e-100 + assert num == ujson.ujson_loads(ujson.ujson_dumps(num)) + num = -1e-45 + assert num == ujson.ujson_loads(ujson.ujson_dumps(num)) + num = -1e-145 + assert np.allclose(num, ujson.ujson_loads(ujson.ujson_dumps(num))) + + @pytest.mark.parametrize("unicode_key", ["key1", "بن"]) + def test_encode_dict_with_unicode_keys(self, unicode_key): + unicode_dict = {unicode_key: "value1"} + assert unicode_dict == ujson.ujson_loads(ujson.ujson_dumps(unicode_dict)) + + @pytest.mark.parametrize( + "double_input", [math.pi, -math.pi] # Should work with negatives too. + ) + def test_encode_double_conversion(self, double_input): + output = ujson.ujson_dumps(double_input) + assert round(double_input, 5) == round(json.loads(output), 5) + assert round(double_input, 5) == round(ujson.ujson_loads(output), 5) + + def test_encode_with_decimal(self): + decimal_input = 1.0 + output = ujson.ujson_dumps(decimal_input) + + assert output == "1.0" + + def test_encode_array_of_nested_arrays(self): + nested_input = [[[[]]]] * 20 + output = ujson.ujson_dumps(nested_input) + + assert nested_input == json.loads(output) + assert nested_input == ujson.ujson_loads(output) + + def test_encode_array_of_doubles(self): + doubles_input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10 + output = ujson.ujson_dumps(doubles_input) + + assert doubles_input == json.loads(output) + assert doubles_input == ujson.ujson_loads(output) + + def test_double_precision(self): + double_input = 30.012345678901234 + output = ujson.ujson_dumps(double_input, double_precision=15) + + assert double_input == json.loads(output) + assert double_input == ujson.ujson_loads(output) + + for double_precision in (3, 9): + output = ujson.ujson_dumps(double_input, double_precision=double_precision) + rounded_input = round(double_input, double_precision) + + assert rounded_input == json.loads(output) + assert rounded_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize( + "invalid_val", + [ + 20, + -1, + "9", + None, + ], + ) + def test_invalid_double_precision(self, invalid_val): + double_input = 30.12345678901234567890 + expected_exception = ValueError if isinstance(invalid_val, int) else TypeError + msg = ( + r"Invalid value '.*' for option 'double_precision', max is '15'|" + r"an integer is required \(got type |" + r"object cannot be interpreted as an integer" + ) + with pytest.raises(expected_exception, match=msg): + ujson.ujson_dumps(double_input, double_precision=invalid_val) + + def test_encode_string_conversion2(self): + string_input = "A string \\ / \b \f \n \r \t" + output = ujson.ujson_dumps(string_input) + + assert string_input == json.loads(output) + assert string_input == ujson.ujson_loads(output) + assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"' + + @pytest.mark.parametrize( + "unicode_input", + ["Räksmörgås اسامة بن محمد بن عوض بن لادن", "\xe6\x97\xa5\xd1\x88"], + ) + def test_encode_unicode_conversion(self, unicode_input): + enc = ujson.ujson_dumps(unicode_input) + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(unicode_input) + assert dec == json.loads(enc) + + def test_encode_control_escaping(self): + escaped_input = "\x19" + enc = ujson.ujson_dumps(escaped_input) + dec = ujson.ujson_loads(enc) + + assert escaped_input == dec + assert enc == json.dumps(escaped_input) + + def test_encode_unicode_surrogate_pair(self): + surrogate_input = "\xf0\x90\x8d\x86" + enc = ujson.ujson_dumps(surrogate_input) + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(surrogate_input) + assert dec == json.loads(enc) + + def test_encode_unicode_4bytes_utf8(self): + four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL" + enc = ujson.ujson_dumps(four_bytes_input) + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(four_bytes_input) + assert dec == json.loads(enc) + + def test_encode_unicode_4bytes_utf8highest(self): + four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL" + enc = ujson.ujson_dumps(four_bytes_input) + + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(four_bytes_input) + assert dec == json.loads(enc) + + def test_encode_unicode_error(self): + string = "'\udac0'" + msg = ( + r"'utf-8' codec can't encode character '\\udac0' " + r"in position 1: surrogates not allowed" + ) + with pytest.raises(UnicodeEncodeError, match=msg): + ujson.ujson_dumps([string]) + + def test_encode_array_in_array(self): + arr_in_arr_input = [[[[]]]] + output = ujson.ujson_dumps(arr_in_arr_input) + + assert arr_in_arr_input == json.loads(output) + assert output == json.dumps(arr_in_arr_input) + assert arr_in_arr_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize( + "num_input", + [ + 31337, + -31337, # Negative number. + -9223372036854775808, # Large negative number. + ], + ) + def test_encode_num_conversion(self, num_input): + output = ujson.ujson_dumps(num_input) + assert num_input == json.loads(output) + assert output == json.dumps(num_input) + assert num_input == ujson.ujson_loads(output) + + def test_encode_list_conversion(self): + list_input = [1, 2, 3, 4] + output = ujson.ujson_dumps(list_input) + + assert list_input == json.loads(output) + assert list_input == ujson.ujson_loads(output) + + def test_encode_dict_conversion(self): + dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4} + output = ujson.ujson_dumps(dict_input) + + assert dict_input == json.loads(output) + assert dict_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize("builtin_value", [None, True, False]) + def test_encode_builtin_values_conversion(self, builtin_value): + output = ujson.ujson_dumps(builtin_value) + assert builtin_value == json.loads(output) + assert output == json.dumps(builtin_value) + assert builtin_value == ujson.ujson_loads(output) + + def test_encode_datetime_conversion(self): + datetime_input = datetime.datetime.fromtimestamp(time.time()) + output = ujson.ujson_dumps(datetime_input, date_unit="s") + expected = calendar.timegm(datetime_input.utctimetuple()) + + assert int(expected) == json.loads(output) + assert int(expected) == ujson.ujson_loads(output) + + def test_encode_date_conversion(self): + date_input = datetime.date.fromtimestamp(time.time()) + output = ujson.ujson_dumps(date_input, date_unit="s") + + tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0) + expected = calendar.timegm(tup) + + assert int(expected) == json.loads(output) + assert int(expected) == ujson.ujson_loads(output) + + @pytest.mark.parametrize( + "test", + [datetime.time(), datetime.time(1, 2, 3), datetime.time(10, 12, 15, 343243)], + ) + def test_encode_time_conversion_basic(self, test): + output = ujson.ujson_dumps(test) + expected = f'"{test.isoformat()}"' + assert expected == output + + def test_encode_time_conversion_pytz(self): + # see gh-11473: to_json segfaults with timezone-aware datetimes + test = datetime.time(10, 12, 15, 343243, pytz.utc) + output = ujson.ujson_dumps(test) + expected = f'"{test.isoformat()}"' + assert expected == output + + def test_encode_time_conversion_dateutil(self): + # see gh-11473: to_json segfaults with timezone-aware datetimes + test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc()) + output = ujson.ujson_dumps(test) + expected = f'"{test.isoformat()}"' + assert expected == output + + @pytest.mark.parametrize( + "decoded_input", [NaT, np.datetime64("NaT"), np.nan, np.inf, -np.inf] + ) + def test_encode_as_null(self, decoded_input): + assert ujson.ujson_dumps(decoded_input) == "null", "Expected null" + + def test_datetime_units(self): + val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504) + stamp = Timestamp(val).as_unit("ns") + + roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="s")) + assert roundtrip == stamp._value // 10**9 + + roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ms")) + assert roundtrip == stamp._value // 10**6 + + roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="us")) + assert roundtrip == stamp._value // 10**3 + + roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ns")) + assert roundtrip == stamp._value + + msg = "Invalid value 'foo' for option 'date_unit'" + with pytest.raises(ValueError, match=msg): + ujson.ujson_dumps(val, date_unit="foo") + + def test_encode_to_utf8(self): + unencoded = "\xe6\x97\xa5\xd1\x88" + + enc = ujson.ujson_dumps(unencoded, ensure_ascii=False) + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(unencoded, ensure_ascii=False) + assert dec == json.loads(enc) + + def test_decode_from_unicode(self): + unicode_input = '{"obj": 31337}' + + dec1 = ujson.ujson_loads(unicode_input) + dec2 = ujson.ujson_loads(str(unicode_input)) + + assert dec1 == dec2 + + def test_encode_recursion_max(self): + # 8 is the max recursion depth + + class O2: + member = 0 + + class O1: + member = 0 + + decoded_input = O1() + decoded_input.member = O2() + decoded_input.member.member = decoded_input + + with pytest.raises(OverflowError, match="Maximum recursion level reached"): + ujson.ujson_dumps(decoded_input) + + def test_decode_jibberish(self): + jibberish = "fdsa sda v9sa fdsa" + msg = "Unexpected character found when decoding 'false'" + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(jibberish) + + @pytest.mark.parametrize( + "broken_json", + [ + "[", # Broken array start. + "{", # Broken object start. + "]", # Broken array end. + "}", # Broken object end. + ], + ) + def test_decode_broken_json(self, broken_json): + msg = "Expected object or value" + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(broken_json) + + @pytest.mark.parametrize("too_big_char", ["[", "{"]) + def test_decode_depth_too_big(self, too_big_char): + with pytest.raises(ValueError, match="Reached object decoding depth limit"): + ujson.ujson_loads(too_big_char * (1024 * 1024)) + + @pytest.mark.parametrize( + "bad_string", + [ + '"TESTING', # Unterminated. + '"TESTING\\"', # Unterminated escape. + "tru", # Broken True. + "fa", # Broken False. + "n", # Broken None. + ], + ) + def test_decode_bad_string(self, bad_string): + msg = ( + "Unexpected character found when decoding|" + "Unmatched ''\"' when when decoding 'string'" + ) + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(bad_string) + + @pytest.mark.parametrize( + "broken_json, err_msg", + [ + ( + '{{1337:""}}', + "Key name of object must be 'string' when decoding 'object'", + ), + ('{{"key":"}', "Unmatched ''\"' when when decoding 'string'"), + ("[[[true", "Unexpected character found when decoding array value (2)"), + ], + ) + def test_decode_broken_json_leak(self, broken_json, err_msg): + for _ in range(1000): + with pytest.raises(ValueError, match=re.escape(err_msg)): + ujson.ujson_loads(broken_json) + + @pytest.mark.parametrize( + "invalid_dict", + [ + "{{{{31337}}}}", # No key. + '{{{{"key":}}}}', # No value. + '{{{{"key"}}}}', # No colon or value. + ], + ) + def test_decode_invalid_dict(self, invalid_dict): + msg = ( + "Key name of object must be 'string' when decoding 'object'|" + "No ':' found when decoding object value|" + "Expected object or value" + ) + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(invalid_dict) + + @pytest.mark.parametrize( + "numeric_int_as_str", ["31337", "-31337"] # Should work with negatives. + ) + def test_decode_numeric_int(self, numeric_int_as_str): + assert int(numeric_int_as_str) == ujson.ujson_loads(numeric_int_as_str) + + def test_encode_null_character(self): + wrapped_input = "31337 \x00 1337" + output = ujson.ujson_dumps(wrapped_input) + + assert wrapped_input == json.loads(output) + assert output == json.dumps(wrapped_input) + assert wrapped_input == ujson.ujson_loads(output) + + alone_input = "\x00" + output = ujson.ujson_dumps(alone_input) + + assert alone_input == json.loads(output) + assert output == json.dumps(alone_input) + assert alone_input == ujson.ujson_loads(output) + assert '" \\u0000\\r\\n "' == ujson.ujson_dumps(" \u0000\r\n ") + + def test_decode_null_character(self): + wrapped_input = '"31337 \\u0000 31337"' + assert ujson.ujson_loads(wrapped_input) == json.loads(wrapped_input) + + def test_encode_list_long_conversion(self): + long_input = [ + 9223372036854775807, + 9223372036854775807, + 9223372036854775807, + 9223372036854775807, + 9223372036854775807, + 9223372036854775807, + ] + output = ujson.ujson_dumps(long_input) + + assert long_input == json.loads(output) + assert long_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize("long_input", [9223372036854775807, 18446744073709551615]) + def test_encode_long_conversion(self, long_input): + output = ujson.ujson_dumps(long_input) + + assert long_input == json.loads(output) + assert output == json.dumps(long_input) + assert long_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize("bigNum", [2**64, -(2**63) - 1]) + def test_dumps_ints_larger_than_maxsize(self, bigNum): + encoding = ujson.ujson_dumps(bigNum) + assert str(bigNum) == encoding + + with pytest.raises( + ValueError, + match="Value is too big|Value is too small", + ): + assert ujson.ujson_loads(encoding) == bigNum + + @pytest.mark.parametrize( + "int_exp", ["1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"] + ) + def test_decode_numeric_int_exp(self, int_exp): + assert ujson.ujson_loads(int_exp) == json.loads(int_exp) + + def test_loads_non_str_bytes_raises(self): + msg = "a bytes-like object is required, not 'NoneType'" + with pytest.raises(TypeError, match=msg): + ujson.ujson_loads(None) + + @pytest.mark.parametrize("val", [3590016419, 2**31, 2**32, (2**32) - 1]) + def test_decode_number_with_32bit_sign_bit(self, val): + # Test that numbers that fit within 32 bits but would have the + # sign bit set (2**31 <= x < 2**32) are decoded properly. + doc = f'{{"id": {val}}}' + assert ujson.ujson_loads(doc)["id"] == val + + def test_encode_big_escape(self): + # Make sure no Exception is raised. + for _ in range(10): + base = "\u00e5".encode() + escape_input = base * 1024 * 1024 * 2 + ujson.ujson_dumps(escape_input) + + def test_decode_big_escape(self): + # Make sure no Exception is raised. + for _ in range(10): + base = "\u00e5".encode() + quote = b'"' + + escape_input = quote + (base * 1024 * 1024 * 2) + quote + ujson.ujson_loads(escape_input) + + def test_to_dict(self): + d = {"key": 31337} + + class DictTest: + def toDict(self): + return d + + o = DictTest() + output = ujson.ujson_dumps(o) + + dec = ujson.ujson_loads(output) + assert dec == d + + def test_default_handler(self): + class _TestObject: + def __init__(self, val) -> None: + self.val = val + + @property + def recursive_attr(self): + return _TestObject("recursive_attr") + + def __str__(self) -> str: + return str(self.val) + + msg = "Maximum recursion level reached" + with pytest.raises(OverflowError, match=msg): + ujson.ujson_dumps(_TestObject("foo")) + assert '"foo"' == ujson.ujson_dumps(_TestObject("foo"), default_handler=str) + + def my_handler(_): + return "foobar" + + assert '"foobar"' == ujson.ujson_dumps( + _TestObject("foo"), default_handler=my_handler + ) + + def my_handler_raises(_): + raise TypeError("I raise for anything") + + with pytest.raises(TypeError, match="I raise for anything"): + ujson.ujson_dumps(_TestObject("foo"), default_handler=my_handler_raises) + + def my_int_handler(_): + return 42 + + assert ( + ujson.ujson_loads( + ujson.ujson_dumps(_TestObject("foo"), default_handler=my_int_handler) + ) + == 42 + ) + + def my_obj_handler(_): + return datetime.datetime(2013, 2, 3) + + assert ujson.ujson_loads( + ujson.ujson_dumps(datetime.datetime(2013, 2, 3)) + ) == ujson.ujson_loads( + ujson.ujson_dumps(_TestObject("foo"), default_handler=my_obj_handler) + ) + + obj_list = [_TestObject("foo"), _TestObject("bar")] + assert json.loads(json.dumps(obj_list, default=str)) == ujson.ujson_loads( + ujson.ujson_dumps(obj_list, default_handler=str) + ) + + def test_encode_object(self): + class _TestObject: + def __init__(self, a, b, _c, d) -> None: + self.a = a + self.b = b + self._c = _c + self.d = d + + def e(self): + return 5 + + # JSON keys should be all non-callable non-underscore attributes, see GH-42768 + test_object = _TestObject(a=1, b=2, _c=3, d=4) + assert ujson.ujson_loads(ujson.ujson_dumps(test_object)) == { + "a": 1, + "b": 2, + "d": 4, + } + + def test_ujson__name__(self): + # GH 52898 + assert ujson.__name__ == "pandas._libs.json" + + +class TestNumpyJSONTests: + @pytest.mark.parametrize("bool_input", [True, False]) + def test_bool(self, bool_input): + b = bool(bool_input) + assert ujson.ujson_loads(ujson.ujson_dumps(b)) == b + + def test_bool_array(self): + bool_array = np.array( + [True, False, True, True, False, True, False, False], dtype=bool + ) + output = np.array(ujson.ujson_loads(ujson.ujson_dumps(bool_array)), dtype=bool) + tm.assert_numpy_array_equal(bool_array, output) + + def test_int(self, any_int_numpy_dtype): + klass = np.dtype(any_int_numpy_dtype).type + num = klass(1) + + assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num + + def test_int_array(self, any_int_numpy_dtype): + arr = np.arange(100, dtype=int) + arr_input = arr.astype(any_int_numpy_dtype) + + arr_output = np.array( + ujson.ujson_loads(ujson.ujson_dumps(arr_input)), dtype=any_int_numpy_dtype + ) + tm.assert_numpy_array_equal(arr_input, arr_output) + + def test_int_max(self, any_int_numpy_dtype): + if any_int_numpy_dtype in ("int64", "uint64") and not IS64: + pytest.skip("Cannot test 64-bit integer on 32-bit platform") + + klass = np.dtype(any_int_numpy_dtype).type + + # uint64 max will always overflow, + # as it's encoded to signed. + if any_int_numpy_dtype == "uint64": + num = np.iinfo("int64").max + else: + num = np.iinfo(any_int_numpy_dtype).max + + assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num + + def test_float(self, float_numpy_dtype): + klass = np.dtype(float_numpy_dtype).type + num = klass(256.2013) + + assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num + + def test_float_array(self, float_numpy_dtype): + arr = np.arange(12.5, 185.72, 1.7322, dtype=float) + float_input = arr.astype(float_numpy_dtype) + + float_output = np.array( + ujson.ujson_loads(ujson.ujson_dumps(float_input, double_precision=15)), + dtype=float_numpy_dtype, + ) + tm.assert_almost_equal(float_input, float_output) + + def test_float_max(self, float_numpy_dtype): + klass = np.dtype(float_numpy_dtype).type + num = klass(np.finfo(float_numpy_dtype).max / 10) + + tm.assert_almost_equal( + klass(ujson.ujson_loads(ujson.ujson_dumps(num, double_precision=15))), num + ) + + def test_array_basic(self): + arr = np.arange(96) + arr = arr.reshape((2, 2, 2, 2, 3, 2)) + + tm.assert_numpy_array_equal( + np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr + ) + + @pytest.mark.parametrize("shape", [(10, 10), (5, 5, 4), (100, 1)]) + def test_array_reshaped(self, shape): + arr = np.arange(100) + arr = arr.reshape(shape) + + tm.assert_numpy_array_equal( + np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr + ) + + def test_array_list(self): + arr_list = [ + "a", + [], + {}, + {}, + [], + 42, + 97.8, + ["a", "b"], + {"key": "val"}, + ] + arr = np.array(arr_list, dtype=object) + result = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=object) + tm.assert_numpy_array_equal(result, arr) + + def test_array_float(self): + dtype = np.float32 + + arr = np.arange(100.202, 200.202, 1, dtype=dtype) + arr = arr.reshape((5, 5, 4)) + + arr_out = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=dtype) + tm.assert_almost_equal(arr, arr_out) + + def test_0d_array(self): + # gh-18878 + msg = re.escape( + "array(1) (numpy-scalar) is not JSON serializable at the moment" + ) + with pytest.raises(TypeError, match=msg): + ujson.ujson_dumps(np.array(1)) + + def test_array_long_double(self): + msg = re.compile( + "1234.5.* \\(numpy-scalar\\) is not JSON serializable at the moment" + ) + with pytest.raises(TypeError, match=msg): + ujson.ujson_dumps(np.longdouble(1234.5)) + + +class TestPandasJSONTests: + def test_dataframe(self, orient): + dtype = np.int64 + + df = DataFrame( + [[1, 2, 3], [4, 5, 6]], + index=["a", "b"], + columns=["x", "y", "z"], + dtype=dtype, + ) + encode_kwargs = {} if orient is None else {"orient": orient} + assert (df.dtypes == dtype).all() + + output = ujson.ujson_loads(ujson.ujson_dumps(df, **encode_kwargs)) + assert (df.dtypes == dtype).all() + + # Ensure proper DataFrame initialization. + if orient == "split": + dec = _clean_dict(output) + output = DataFrame(**dec) + else: + output = DataFrame(output) + + # Corrections to enable DataFrame comparison. + if orient == "values": + df.columns = [0, 1, 2] + df.index = [0, 1] + elif orient == "records": + df.index = [0, 1] + elif orient == "index": + df = df.transpose() + + assert (df.dtypes == dtype).all() + tm.assert_frame_equal(output, df) + + def test_dataframe_nested(self, orient): + df = DataFrame( + [[1, 2, 3], [4, 5, 6]], index=["a", "b"], columns=["x", "y", "z"] + ) + + nested = {"df1": df, "df2": df.copy()} + kwargs = {} if orient is None else {"orient": orient} + + exp = { + "df1": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)), + "df2": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)), + } + assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp + + def test_series(self, orient): + dtype = np.int64 + s = Series( + [10, 20, 30, 40, 50, 60], + name="series", + index=[6, 7, 8, 9, 10, 15], + dtype=dtype, + ).sort_values() + assert s.dtype == dtype + + encode_kwargs = {} if orient is None else {"orient": orient} + + output = ujson.ujson_loads(ujson.ujson_dumps(s, **encode_kwargs)) + assert s.dtype == dtype + + if orient == "split": + dec = _clean_dict(output) + output = Series(**dec) + else: + output = Series(output) + + if orient in (None, "index"): + s.name = None + output = output.sort_values() + s.index = ["6", "7", "8", "9", "10", "15"] + elif orient in ("records", "values"): + s.name = None + s.index = [0, 1, 2, 3, 4, 5] + + assert s.dtype == dtype + tm.assert_series_equal(output, s) + + def test_series_nested(self, orient): + s = Series( + [10, 20, 30, 40, 50, 60], name="series", index=[6, 7, 8, 9, 10, 15] + ).sort_values() + nested = {"s1": s, "s2": s.copy()} + kwargs = {} if orient is None else {"orient": orient} + + exp = { + "s1": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)), + "s2": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)), + } + assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp + + def test_index(self): + i = Index([23, 45, 18, 98, 43, 11], name="index") + + # Column indexed. + output = Index(ujson.ujson_loads(ujson.ujson_dumps(i)), name="index") + tm.assert_index_equal(i, output) + + dec = _clean_dict(ujson.ujson_loads(ujson.ujson_dumps(i, orient="split"))) + output = Index(**dec) + + tm.assert_index_equal(i, output) + assert i.name == output.name + + tm.assert_index_equal(i, output) + assert i.name == output.name + + output = Index( + ujson.ujson_loads(ujson.ujson_dumps(i, orient="values")), name="index" + ) + tm.assert_index_equal(i, output) + + output = Index( + ujson.ujson_loads(ujson.ujson_dumps(i, orient="records")), name="index" + ) + tm.assert_index_equal(i, output) + + output = Index( + ujson.ujson_loads(ujson.ujson_dumps(i, orient="index")), name="index" + ) + tm.assert_index_equal(i, output) + + def test_datetime_index(self): + date_unit = "ns" + + # freq doesn't round-trip + rng = DatetimeIndex(list(date_range("1/1/2000", periods=20)), freq=None) + encoded = ujson.ujson_dumps(rng, date_unit=date_unit) + + decoded = DatetimeIndex(np.array(ujson.ujson_loads(encoded))) + tm.assert_index_equal(rng, decoded) + + ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng) + decoded = Series(ujson.ujson_loads(ujson.ujson_dumps(ts, date_unit=date_unit))) + + idx_values = decoded.index.values.astype(np.int64) + decoded.index = DatetimeIndex(idx_values) + tm.assert_series_equal(ts, decoded) + + @pytest.mark.parametrize( + "invalid_arr", + [ + "[31337,]", # Trailing comma. + "[,31337]", # Leading comma. + "[]]", # Unmatched bracket. + "[,]", # Only comma. + ], + ) + def test_decode_invalid_array(self, invalid_arr): + msg = ( + "Expected object or value|Trailing data|" + "Unexpected character found when decoding array value" + ) + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(invalid_arr) + + @pytest.mark.parametrize("arr", [[], [31337]]) + def test_decode_array(self, arr): + assert arr == ujson.ujson_loads(str(arr)) + + @pytest.mark.parametrize("extreme_num", [9223372036854775807, -9223372036854775808]) + def test_decode_extreme_numbers(self, extreme_num): + assert extreme_num == ujson.ujson_loads(str(extreme_num)) + + @pytest.mark.parametrize("too_extreme_num", [f"{2**64}", f"{-2**63-1}"]) + def test_decode_too_extreme_numbers(self, too_extreme_num): + with pytest.raises( + ValueError, + match="Value is too big|Value is too small", + ): + ujson.ujson_loads(too_extreme_num) + + def test_decode_with_trailing_whitespaces(self): + assert {} == ujson.ujson_loads("{}\n\t ") + + def test_decode_with_trailing_non_whitespaces(self): + with pytest.raises(ValueError, match="Trailing data"): + ujson.ujson_loads("{}\n\t a") + + @pytest.mark.parametrize("value", [f"{2**64}", f"{-2**63-1}"]) + def test_decode_array_with_big_int(self, value): + with pytest.raises( + ValueError, + match="Value is too big|Value is too small", + ): + ujson.ujson_loads(value) + + @pytest.mark.parametrize( + "float_number", + [ + 1.1234567893, + 1.234567893, + 1.34567893, + 1.4567893, + 1.567893, + 1.67893, + 1.7893, + 1.893, + 1.3, + ], + ) + @pytest.mark.parametrize("sign", [-1, 1]) + def test_decode_floating_point(self, sign, float_number): + float_number *= sign + tm.assert_almost_equal( + float_number, ujson.ujson_loads(str(float_number)), rtol=1e-15 + ) + + def test_encode_big_set(self): + s = set() + + for x in range(100000): + s.add(x) + + # Make sure no Exception is raised. + ujson.ujson_dumps(s) + + def test_encode_empty_set(self): + assert "[]" == ujson.ujson_dumps(set()) + + def test_encode_set(self): + s = {1, 2, 3, 4, 5, 6, 7, 8, 9} + enc = ujson.ujson_dumps(s) + dec = ujson.ujson_loads(enc) + + for v in dec: + assert v in s + + @pytest.mark.parametrize( + "td", + [ + Timedelta(days=366), + Timedelta(days=-1), + Timedelta(hours=13, minutes=5, seconds=5), + Timedelta(hours=13, minutes=20, seconds=30), + Timedelta(days=-1, nanoseconds=5), + Timedelta(nanoseconds=1), + Timedelta(microseconds=1, nanoseconds=1), + Timedelta(milliseconds=1, microseconds=1, nanoseconds=1), + Timedelta(milliseconds=999, microseconds=999, nanoseconds=999), + ], + ) + def test_encode_timedelta_iso(self, td): + # GH 28256 + result = ujson.ujson_dumps(td, iso_dates=True) + expected = f'"{td.isoformat()}"' + + assert result == expected + + def test_encode_periodindex(self): + # GH 46683 + p = PeriodIndex(["2022-04-06", "2022-04-07"], freq="D") + df = DataFrame(index=p) + assert df.to_json() == "{}" diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f518dac4092c46924366f0e16ef524707a383b65 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a318675f86f949cfe6283ffe3f5df4762f551f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbda3ecb073546c92dafc9b8d1696e133ae96601 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e84d35965e2a9bf62c39c36c96a7cad5e4482f58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed8269665bcf00ce58d7eb081453307670ec0a19 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..980c20178f591407512b0ab9fdac7a5db5535d5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..742ca0206bec7781c797945e07d9d1ad6c757aab Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..876045aa76641c406938f90b39e8aefb028d5e44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db7aeffc902cf41221e3e47af33d8768610cd0ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed1b36526f14d5259e54eed8592b1a510c05b3c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3e891a9099a4ae08a249367ccecda17a05fd14a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9be339cc0627b11c53ae26567b6f7b8190f0796 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c495fcd617f1782e298cb0bc4f99ebcbe068df61 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c78bdfc451b9b85e983d2e3abe8f1d622e4a35d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5c2ccb35407a45f7cf06c15273ddda579dc80b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e3c8638061d77ff1242af8cf12af21548d91afd Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..922bc29b7c8f8b2f4c4674ca70cbfd5092a16670 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e73bca89fdac36efc69894f679ad9133eaf7d09c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb04b17743948582adcf9f8f57f0a3ac6ee85141 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffa57013f824ad7b84f03935bf281f7003ef290d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a520d762d84519bcca9789cb9859b0baea3d8f0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_byteswap.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_byteswap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63bdc93fadfd3d080a3a5237b657a69a21986c31 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_byteswap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa2a9164dd262dd05e38a37eba2cdfdb1a3b805a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas7bdat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas7bdat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..461352b07c16780edca6127718a7c112b744db0c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas7bdat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_xport.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_xport.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78cd5dae882a22c88157c0d3069ef3dc19637597 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_xport.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_byteswap.py b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_byteswap.py new file mode 100644 index 0000000000000000000000000000000000000000..6d7f2f05d1b00b1855dd040bf08cebabf73f9b9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_byteswap.py @@ -0,0 +1,55 @@ +from hypothesis import ( + assume, + example, + given, + strategies as st, +) +import numpy as np +import pytest + +from pandas._libs.byteswap import ( + read_double_with_byteswap, + read_float_with_byteswap, + read_uint16_with_byteswap, + read_uint32_with_byteswap, + read_uint64_with_byteswap, +) + +import pandas._testing as tm + + +@given(read_offset=st.integers(0, 11), number=st.integers(min_value=0)) +@example(number=2**16, read_offset=0) +@example(number=2**32, read_offset=0) +@example(number=2**64, read_offset=0) +@pytest.mark.parametrize("int_type", [np.uint16, np.uint32, np.uint64]) +@pytest.mark.parametrize("should_byteswap", [True, False]) +def test_int_byteswap(read_offset, number, int_type, should_byteswap): + assume(number < 2 ** (8 * int_type(0).itemsize)) + _test(number, int_type, read_offset, should_byteswap) + + +@pytest.mark.filterwarnings("ignore:overflow encountered:RuntimeWarning") +@given(read_offset=st.integers(0, 11), number=st.floats()) +@pytest.mark.parametrize("float_type", [np.float32, np.float64]) +@pytest.mark.parametrize("should_byteswap", [True, False]) +def test_float_byteswap(read_offset, number, float_type, should_byteswap): + _test(number, float_type, read_offset, should_byteswap) + + +def _test(number, number_type, read_offset, should_byteswap): + number = number_type(number) + data = np.random.default_rng(2).integers(0, 256, size=20, dtype="uint8") + data[read_offset : read_offset + number.itemsize] = number[None].view("uint8") + swap_func = { + np.float32: read_float_with_byteswap, + np.float64: read_double_with_byteswap, + np.uint16: read_uint16_with_byteswap, + np.uint32: read_uint32_with_byteswap, + np.uint64: read_uint64_with_byteswap, + }[type(number)] + output_number = number_type(swap_func(bytes(data), read_offset, should_byteswap)) + if should_byteswap: + tm.assert_equal(output_number, number.byteswap()) + else: + tm.assert_equal(output_number, number) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas.py b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas.py new file mode 100644 index 0000000000000000000000000000000000000000..1e38baf4fc4093879b850f03d746ae5e67b477ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas.py @@ -0,0 +1,34 @@ +from io import StringIO + +import pytest + +from pandas import read_sas +import pandas._testing as tm + + +class TestSas: + def test_sas_buffer_format(self): + # see gh-14947 + b = StringIO("") + + msg = ( + "If this is a buffer object rather than a string " + "name, you must specify a format string" + ) + with pytest.raises(ValueError, match=msg): + read_sas(b) + + def test_sas_read_no_format_or_extension(self): + # see gh-24548 + msg = "unable to infer format of SAS file.+" + with tm.ensure_clean("test_file_no_extension") as path: + with pytest.raises(ValueError, match=msg): + read_sas(path) + + +def test_sas_archive(datapath): + fname_uncompressed = datapath("io", "sas", "data", "airline.sas7bdat") + df_uncompressed = read_sas(fname_uncompressed) + fname_compressed = datapath("io", "sas", "data", "airline.sas7bdat.gz") + df_compressed = read_sas(fname_compressed, format="sas7bdat") + tm.assert_frame_equal(df_uncompressed, df_compressed) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas7bdat.py b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas7bdat.py new file mode 100644 index 0000000000000000000000000000000000000000..b71896c77ffb5872beb98d3914e6195b69855703 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas7bdat.py @@ -0,0 +1,419 @@ +import contextlib +from datetime import datetime +import io +import os +from pathlib import Path + +import numpy as np +import pytest + +from pandas.compat import IS64 +from pandas.errors import EmptyDataError +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + +from pandas.io.sas.sas7bdat import SAS7BDATReader + + +@pytest.fixture +def dirpath(datapath): + return datapath("io", "sas", "data") + + +@pytest.fixture(params=[(1, range(1, 16)), (2, [16])]) +def data_test_ix(request, dirpath): + i, test_ix = request.param + fname = os.path.join(dirpath, f"test_sas7bdat_{i}.csv") + df = pd.read_csv(fname) + epoch = datetime(1960, 1, 1) + t1 = pd.to_timedelta(df["Column4"], unit="d") + df["Column4"] = (epoch + t1).astype("M8[s]") + t2 = pd.to_timedelta(df["Column12"], unit="d") + df["Column12"] = (epoch + t2).astype("M8[s]") + for k in range(df.shape[1]): + col = df.iloc[:, k] + if col.dtype == np.int64: + df.isetitem(k, df.iloc[:, k].astype(np.float64)) + return df, test_ix + + +# https://github.com/cython/cython/issues/1720 +class TestSAS7BDAT: + @pytest.mark.slow + def test_from_file(self, dirpath, data_test_ix): + expected, test_ix = data_test_ix + for k in test_ix: + fname = os.path.join(dirpath, f"test{k}.sas7bdat") + df = pd.read_sas(fname, encoding="utf-8") + tm.assert_frame_equal(df, expected) + + @pytest.mark.slow + def test_from_buffer(self, dirpath, data_test_ix): + expected, test_ix = data_test_ix + for k in test_ix: + fname = os.path.join(dirpath, f"test{k}.sas7bdat") + with open(fname, "rb") as f: + byts = f.read() + buf = io.BytesIO(byts) + with pd.read_sas( + buf, format="sas7bdat", iterator=True, encoding="utf-8" + ) as rdr: + df = rdr.read() + tm.assert_frame_equal(df, expected) + + @pytest.mark.slow + def test_from_iterator(self, dirpath, data_test_ix): + expected, test_ix = data_test_ix + for k in test_ix: + fname = os.path.join(dirpath, f"test{k}.sas7bdat") + with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr: + df = rdr.read(2) + tm.assert_frame_equal(df, expected.iloc[0:2, :]) + df = rdr.read(3) + tm.assert_frame_equal(df, expected.iloc[2:5, :]) + + @pytest.mark.slow + def test_path_pathlib(self, dirpath, data_test_ix): + expected, test_ix = data_test_ix + for k in test_ix: + fname = Path(os.path.join(dirpath, f"test{k}.sas7bdat")) + df = pd.read_sas(fname, encoding="utf-8") + tm.assert_frame_equal(df, expected) + + @td.skip_if_no("py.path") + @pytest.mark.slow + def test_path_localpath(self, dirpath, data_test_ix): + from py.path import local as LocalPath + + expected, test_ix = data_test_ix + for k in test_ix: + fname = LocalPath(os.path.join(dirpath, f"test{k}.sas7bdat")) + df = pd.read_sas(fname, encoding="utf-8") + tm.assert_frame_equal(df, expected) + + @pytest.mark.slow + @pytest.mark.parametrize("chunksize", (3, 5, 10, 11)) + @pytest.mark.parametrize("k", range(1, 17)) + def test_iterator_loop(self, dirpath, k, chunksize): + # github #13654 + fname = os.path.join(dirpath, f"test{k}.sas7bdat") + with pd.read_sas(fname, chunksize=chunksize, encoding="utf-8") as rdr: + y = 0 + for x in rdr: + y += x.shape[0] + assert y == rdr.row_count + + def test_iterator_read_too_much(self, dirpath): + # github #14734 + fname = os.path.join(dirpath, "test1.sas7bdat") + with pd.read_sas( + fname, format="sas7bdat", iterator=True, encoding="utf-8" + ) as rdr: + d1 = rdr.read(rdr.row_count + 20) + + with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr: + d2 = rdr.read(rdr.row_count + 20) + tm.assert_frame_equal(d1, d2) + + +def test_encoding_options(datapath): + fname = datapath("io", "sas", "data", "test1.sas7bdat") + df1 = pd.read_sas(fname) + df2 = pd.read_sas(fname, encoding="utf-8") + for col in df1.columns: + try: + df1[col] = df1[col].str.decode("utf-8") + except AttributeError: + pass + tm.assert_frame_equal(df1, df2) + + with contextlib.closing(SAS7BDATReader(fname, convert_header_text=False)) as rdr: + df3 = rdr.read() + for x, y in zip(df1.columns, df3.columns): + assert x == y.decode() + + +def test_encoding_infer(datapath): + fname = datapath("io", "sas", "data", "test1.sas7bdat") + + with pd.read_sas(fname, encoding="infer", iterator=True) as df1_reader: + # check: is encoding inferred correctly from file + assert df1_reader.inferred_encoding == "cp1252" + df1 = df1_reader.read() + + with pd.read_sas(fname, encoding="cp1252", iterator=True) as df2_reader: + df2 = df2_reader.read() + + # check: reader reads correct information + tm.assert_frame_equal(df1, df2) + + +def test_productsales(datapath): + fname = datapath("io", "sas", "data", "productsales.sas7bdat") + df = pd.read_sas(fname, encoding="utf-8") + fname = datapath("io", "sas", "data", "productsales.csv") + df0 = pd.read_csv(fname, parse_dates=["MONTH"]) + vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"] + df0[vn] = df0[vn].astype(np.float64) + + df0["MONTH"] = df0["MONTH"].astype("M8[s]") + tm.assert_frame_equal(df, df0) + + +def test_12659(datapath): + fname = datapath("io", "sas", "data", "test_12659.sas7bdat") + df = pd.read_sas(fname) + fname = datapath("io", "sas", "data", "test_12659.csv") + df0 = pd.read_csv(fname) + df0 = df0.astype(np.float64) + tm.assert_frame_equal(df, df0) + + +def test_airline(datapath): + fname = datapath("io", "sas", "data", "airline.sas7bdat") + df = pd.read_sas(fname) + fname = datapath("io", "sas", "data", "airline.csv") + df0 = pd.read_csv(fname) + df0 = df0.astype(np.float64) + tm.assert_frame_equal(df, df0) + + +def test_date_time(datapath): + # Support of different SAS date/datetime formats (PR #15871) + fname = datapath("io", "sas", "data", "datetime.sas7bdat") + df = pd.read_sas(fname) + fname = datapath("io", "sas", "data", "datetime.csv") + df0 = pd.read_csv( + fname, parse_dates=["Date1", "Date2", "DateTime", "DateTimeHi", "Taiw"] + ) + # GH 19732: Timestamps imported from sas will incur floating point errors + # See GH#56014 for discussion of the correct "expected" results + # We are really just testing that we are "close". This only seems to be + # an issue near the implementation bounds. + + df[df.columns[3]] = df.iloc[:, 3].dt.round("us") + df0["Date1"] = df0["Date1"].astype("M8[s]") + df0["Date2"] = df0["Date2"].astype("M8[s]") + df0["DateTime"] = df0["DateTime"].astype("M8[ms]") + df0["Taiw"] = df0["Taiw"].astype("M8[s]") + + res = df0["DateTimeHi"].astype("M8[us]").dt.round("ms") + df0["DateTimeHi"] = res.astype("M8[ms]") + + if not IS64: + # No good reason for this, just what we get on the CI + df0.loc[0, "DateTimeHi"] += np.timedelta64(1, "ms") + df0.loc[[2, 3], "DateTimeHi"] -= np.timedelta64(1, "ms") + tm.assert_frame_equal(df, df0) + + +@pytest.mark.parametrize("column", ["WGT", "CYL"]) +def test_compact_numerical_values(datapath, column): + # Regression test for #21616 + fname = datapath("io", "sas", "data", "cars.sas7bdat") + df = pd.read_sas(fname, encoding="latin-1") + # The two columns CYL and WGT in cars.sas7bdat have column + # width < 8 and only contain integral values. + # Test that pandas doesn't corrupt the numbers by adding + # decimals. + result = df[column] + expected = df[column].round() + tm.assert_series_equal(result, expected, check_exact=True) + + +def test_many_columns(datapath): + # Test for looking for column information in more places (PR #22628) + fname = datapath("io", "sas", "data", "many_columns.sas7bdat") + + df = pd.read_sas(fname, encoding="latin-1") + + fname = datapath("io", "sas", "data", "many_columns.csv") + df0 = pd.read_csv(fname, encoding="latin-1") + tm.assert_frame_equal(df, df0) + + +def test_inconsistent_number_of_rows(datapath): + # Regression test for issue #16615. (PR #22628) + fname = datapath("io", "sas", "data", "load_log.sas7bdat") + df = pd.read_sas(fname, encoding="latin-1") + assert len(df) == 2097 + + +def test_zero_variables(datapath): + # Check if the SAS file has zero variables (PR #18184) + fname = datapath("io", "sas", "data", "zero_variables.sas7bdat") + with pytest.raises(EmptyDataError, match="No columns to parse from file"): + pd.read_sas(fname) + + +def test_zero_rows(datapath): + # GH 18198 + fname = datapath("io", "sas", "data", "zero_rows.sas7bdat") + result = pd.read_sas(fname) + expected = pd.DataFrame([{"char_field": "a", "num_field": 1.0}]).iloc[:0] + tm.assert_frame_equal(result, expected) + + +def test_corrupt_read(datapath): + # We don't really care about the exact failure, the important thing is + # that the resource should be cleaned up afterwards (BUG #35566) + fname = datapath("io", "sas", "data", "corrupt.sas7bdat") + msg = "'SAS7BDATReader' object has no attribute 'row_count'" + with pytest.raises(AttributeError, match=msg): + pd.read_sas(fname) + + +def test_max_sas_date(datapath): + # GH 20927 + # NB. max datetime in SAS dataset is 31DEC9999:23:59:59.999 + # but this is read as 29DEC9999:23:59:59.998993 by a buggy + # sas7bdat module + # See also GH#56014 for discussion of the correct "expected" results. + fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") + df = pd.read_sas(fname, encoding="iso-8859-1") + + expected = pd.DataFrame( + { + "text": ["max", "normal"], + "dt_as_float": [253717747199.999, 1880323199.999], + "dt_as_dt": np.array( + [ + datetime(9999, 12, 29, 23, 59, 59, 999000), + datetime(2019, 8, 1, 23, 59, 59, 999000), + ], + dtype="M8[ms]", + ), + "date_as_float": [2936547.0, 21762.0], + "date_as_date": np.array( + [ + datetime(9999, 12, 29), + datetime(2019, 8, 1), + ], + dtype="M8[s]", + ), + }, + columns=["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"], + ) + + if not IS64: + # No good reason for this, just what we get on the CI + expected.loc[:, "dt_as_dt"] -= np.timedelta64(1, "ms") + + tm.assert_frame_equal(df, expected) + + +def test_max_sas_date_iterator(datapath): + # GH 20927 + # when called as an iterator, only those chunks with a date > pd.Timestamp.max + # are returned as datetime.datetime, if this happens that whole chunk is returned + # as datetime.datetime + col_order = ["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"] + fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") + results = [] + for df in pd.read_sas(fname, encoding="iso-8859-1", chunksize=1): + # GH 19732: Timestamps imported from sas will incur floating point errors + df.reset_index(inplace=True, drop=True) + results.append(df) + expected = [ + pd.DataFrame( + { + "text": ["max"], + "dt_as_float": [253717747199.999], + "dt_as_dt": np.array( + [datetime(9999, 12, 29, 23, 59, 59, 999000)], dtype="M8[ms]" + ), + "date_as_float": [2936547.0], + "date_as_date": np.array([datetime(9999, 12, 29)], dtype="M8[s]"), + }, + columns=col_order, + ), + pd.DataFrame( + { + "text": ["normal"], + "dt_as_float": [1880323199.999], + "dt_as_dt": np.array(["2019-08-01 23:59:59.999"], dtype="M8[ms]"), + "date_as_float": [21762.0], + "date_as_date": np.array(["2019-08-01"], dtype="M8[s]"), + }, + columns=col_order, + ), + ] + if not IS64: + # No good reason for this, just what we get on the CI + expected[0].loc[0, "dt_as_dt"] -= np.timedelta64(1, "ms") + expected[1].loc[0, "dt_as_dt"] -= np.timedelta64(1, "ms") + + tm.assert_frame_equal(results[0], expected[0]) + tm.assert_frame_equal(results[1], expected[1]) + + +def test_null_date(datapath): + fname = datapath("io", "sas", "data", "dates_null.sas7bdat") + df = pd.read_sas(fname, encoding="utf-8") + + expected = pd.DataFrame( + { + "datecol": np.array( + [ + datetime(9999, 12, 29), + np.datetime64("NaT"), + ], + dtype="M8[s]", + ), + "datetimecol": np.array( + [ + datetime(9999, 12, 29, 23, 59, 59, 999000), + np.datetime64("NaT"), + ], + dtype="M8[ms]", + ), + }, + ) + if not IS64: + # No good reason for this, just what we get on the CI + expected.loc[0, "datetimecol"] -= np.timedelta64(1, "ms") + tm.assert_frame_equal(df, expected) + + +def test_meta2_page(datapath): + # GH 35545 + fname = datapath("io", "sas", "data", "test_meta2_page.sas7bdat") + df = pd.read_sas(fname) + assert len(df) == 1000 + + +@pytest.mark.parametrize( + "test_file, override_offset, override_value, expected_msg", + [ + ("test2.sas7bdat", 0x10000 + 55229, 0x80 | 0x0F, "Out of bounds"), + ("test2.sas7bdat", 0x10000 + 55229, 0x10, "unknown control byte"), + ("test3.sas7bdat", 118170, 184, "Out of bounds"), + ], +) +def test_rle_rdc_exceptions( + datapath, test_file, override_offset, override_value, expected_msg +): + """Errors in RLE/RDC decompression should propagate.""" + with open(datapath("io", "sas", "data", test_file), "rb") as fd: + data = bytearray(fd.read()) + data[override_offset] = override_value + with pytest.raises(Exception, match=expected_msg): + pd.read_sas(io.BytesIO(data), format="sas7bdat") + + +def test_0x40_control_byte(datapath): + # GH 31243 + fname = datapath("io", "sas", "data", "0x40controlbyte.sas7bdat") + df = pd.read_sas(fname, encoding="ascii") + fname = datapath("io", "sas", "data", "0x40controlbyte.csv") + df0 = pd.read_csv(fname, dtype="object") + tm.assert_frame_equal(df, df0) + + +def test_0x00_control_byte(datapath): + # GH 47099 + fname = datapath("io", "sas", "data", "0x00controlbyte.sas7bdat.bz2") + df = next(pd.read_sas(fname, chunksize=11_000)) + assert df.shape == (11_000, 20) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_xport.py b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_xport.py new file mode 100644 index 0000000000000000000000000000000000000000..766c9c37d55b9ee3fcb1d206b9ed100aaaf1d610 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/sas/test_xport.py @@ -0,0 +1,167 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +from pandas.io.sas.sasreader import read_sas + +# CSV versions of test xpt files were obtained using the R foreign library + +# Numbers in a SAS xport file are always float64, so need to convert +# before making comparisons. + + +def numeric_as_float(data): + for v in data.columns: + if data[v].dtype is np.dtype("int64"): + data[v] = data[v].astype(np.float64) + + +class TestXport: + @pytest.fixture + def file01(self, datapath): + return datapath("io", "sas", "data", "DEMO_G.xpt") + + @pytest.fixture + def file02(self, datapath): + return datapath("io", "sas", "data", "SSHSV1_A.xpt") + + @pytest.fixture + def file03(self, datapath): + return datapath("io", "sas", "data", "DRXFCD_G.xpt") + + @pytest.fixture + def file04(self, datapath): + return datapath("io", "sas", "data", "paxraw_d_short.xpt") + + @pytest.fixture + def file05(self, datapath): + return datapath("io", "sas", "data", "DEMO_PUF.cpt") + + @pytest.mark.slow + def test1_basic(self, file01): + # Tests with DEMO_G.xpt (all numeric file) + + # Compare to this + data_csv = pd.read_csv(file01.replace(".xpt", ".csv")) + numeric_as_float(data_csv) + + # Read full file + data = read_sas(file01, format="xport") + tm.assert_frame_equal(data, data_csv) + num_rows = data.shape[0] + + # Test reading beyond end of file + with read_sas(file01, format="xport", iterator=True) as reader: + data = reader.read(num_rows + 100) + assert data.shape[0] == num_rows + + # Test incremental read with `read` method. + with read_sas(file01, format="xport", iterator=True) as reader: + data = reader.read(10) + tm.assert_frame_equal(data, data_csv.iloc[0:10, :]) + + # Test incremental read with `get_chunk` method. + with read_sas(file01, format="xport", chunksize=10) as reader: + data = reader.get_chunk() + tm.assert_frame_equal(data, data_csv.iloc[0:10, :]) + + # Test read in loop + m = 0 + with read_sas(file01, format="xport", chunksize=100) as reader: + for x in reader: + m += x.shape[0] + assert m == num_rows + + # Read full file with `read_sas` method + data = read_sas(file01) + tm.assert_frame_equal(data, data_csv) + + def test1_index(self, file01): + # Tests with DEMO_G.xpt using index (all numeric file) + + # Compare to this + data_csv = pd.read_csv(file01.replace(".xpt", ".csv")) + data_csv = data_csv.set_index("SEQN") + numeric_as_float(data_csv) + + # Read full file + data = read_sas(file01, index="SEQN", format="xport") + tm.assert_frame_equal(data, data_csv, check_index_type=False) + + # Test incremental read with `read` method. + with read_sas(file01, index="SEQN", format="xport", iterator=True) as reader: + data = reader.read(10) + tm.assert_frame_equal(data, data_csv.iloc[0:10, :], check_index_type=False) + + # Test incremental read with `get_chunk` method. + with read_sas(file01, index="SEQN", format="xport", chunksize=10) as reader: + data = reader.get_chunk() + tm.assert_frame_equal(data, data_csv.iloc[0:10, :], check_index_type=False) + + def test1_incremental(self, file01): + # Test with DEMO_G.xpt, reading full file incrementally + + data_csv = pd.read_csv(file01.replace(".xpt", ".csv")) + data_csv = data_csv.set_index("SEQN") + numeric_as_float(data_csv) + + with read_sas(file01, index="SEQN", chunksize=1000) as reader: + all_data = list(reader) + data = pd.concat(all_data, axis=0) + + tm.assert_frame_equal(data, data_csv, check_index_type=False) + + def test2(self, file02): + # Test with SSHSV1_A.xpt + + # Compare to this + data_csv = pd.read_csv(file02.replace(".xpt", ".csv")) + numeric_as_float(data_csv) + + data = read_sas(file02) + tm.assert_frame_equal(data, data_csv) + + def test2_binary(self, file02): + # Test with SSHSV1_A.xpt, read as a binary file + + # Compare to this + data_csv = pd.read_csv(file02.replace(".xpt", ".csv")) + numeric_as_float(data_csv) + + with open(file02, "rb") as fd: + # GH#35693 ensure that if we pass an open file, we + # dont incorrectly close it in read_sas + data = read_sas(fd, format="xport") + + tm.assert_frame_equal(data, data_csv) + + def test_multiple_types(self, file03): + # Test with DRXFCD_G.xpt (contains text and numeric variables) + + # Compare to this + data_csv = pd.read_csv(file03.replace(".xpt", ".csv")) + + data = read_sas(file03, encoding="utf-8") + tm.assert_frame_equal(data, data_csv) + + def test_truncated_float_support(self, file04): + # Test with paxraw_d_short.xpt, a shortened version of: + # http://wwwn.cdc.gov/Nchs/Nhanes/2005-2006/PAXRAW_D.ZIP + # This file has truncated floats (5 bytes in this case). + + # GH 11713 + + data_csv = pd.read_csv(file04.replace(".xpt", ".csv")) + + data = read_sas(file04, format="xport") + tm.assert_frame_equal(data.astype("int64"), data_csv) + + def test_cport_header_found_raises(self, file05): + # Test with DEMO_PUF.cpt, the beginning of puf2019_1_fall.xpt + # from https://www.cms.gov/files/zip/puf2019.zip + # (despite the extension, it's a cpt file) + msg = "Header record indicates a CPORT file, which is not readable." + with pytest.raises(ValueError, match=msg): + read_sas(file05, format="xport") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63da9688c555fbfdcfaf0ddd191f0680580e6bbc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23eb8015ab70c0029d4b852442c919fddd25e806 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e9c9babea59ff55e07eb0234776e16a8cfc21a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ca42306104e6f49d135a2e3ee84181c3515d27c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1569b6a0a1329e8bc98a6da2b10c2478ed77dc63 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..aafda0ff62bbdf94331fb7cb8fe5d51b6eb1d63a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/conftest.py @@ -0,0 +1,38 @@ +from pathlib import Path + +import pytest + + +@pytest.fixture +def xml_data_path(): + return Path(__file__).parent.parent / "data" / "xml" + + +@pytest.fixture +def xml_books(xml_data_path, datapath): + return datapath(xml_data_path / "books.xml") + + +@pytest.fixture +def xml_doc_ch_utf(xml_data_path, datapath): + return datapath(xml_data_path / "doc_ch_utf.xml") + + +@pytest.fixture +def xml_baby_names(xml_data_path, datapath): + return datapath(xml_data_path / "baby_names.xml") + + +@pytest.fixture +def kml_cta_rail_lines(xml_data_path, datapath): + return datapath(xml_data_path / "cta_rail_lines.kml") + + +@pytest.fixture +def xsl_flatten_doc(xml_data_path, datapath): + return datapath(xml_data_path / "flatten_doc.xsl") + + +@pytest.fixture +def xsl_row_field_output(xml_data_path, datapath): + return datapath(xml_data_path / "row_field_output.xsl") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/test_to_xml.py b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/test_to_xml.py new file mode 100644 index 0000000000000000000000000000000000000000..37251a58b0c119ef1da15c259e9e77a456b86ac9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/test_to_xml.py @@ -0,0 +1,1375 @@ +from __future__ import annotations + +from io import ( + BytesIO, + StringIO, +) +import os + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + NA, + DataFrame, + Index, +) +import pandas._testing as tm + +from pandas.io.common import get_handle +from pandas.io.xml import read_xml + +# CHECKLIST + +# [x] - ValueError: "Values for parser can only be lxml or etree." + +# etree +# [x] - ImportError: "lxml not found, please install or use the etree parser." +# [X] - TypeError: "...is not a valid type for attr_cols" +# [X] - TypeError: "...is not a valid type for elem_cols" +# [X] - LookupError: "unknown encoding" +# [X] - KeyError: "...is not included in namespaces" +# [X] - KeyError: "no valid column" +# [X] - ValueError: "To use stylesheet, you need lxml installed..." +# [] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.) +# [X] - FileNotFoundError: "No such file or directory" +# [X] - PermissionError: "Forbidden" + +# lxml +# [X] - TypeError: "...is not a valid type for attr_cols" +# [X] - TypeError: "...is not a valid type for elem_cols" +# [X] - LookupError: "unknown encoding" +# [] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.) +# [X] - FileNotFoundError: "No such file or directory" +# [X] - KeyError: "...is not included in namespaces" +# [X] - KeyError: "no valid column" +# [X] - ValueError: "stylesheet is not a url, file, or xml string." +# [] - LookupError: (NEED WRONG ENCODING FOR FILE OUTPUT) +# [] - URLError: (USUALLY DUE TO NETWORKING) +# [] - HTTPError: (NEED AN ONLINE STYLESHEET) +# [X] - OSError: "failed to load external entity" +# [X] - XMLSyntaxError: "Opening and ending tag mismatch" +# [X] - XSLTApplyError: "Cannot resolve URI" +# [X] - XSLTParseError: "failed to compile" +# [X] - PermissionError: "Forbidden" + + +@pytest.fixture +def geom_df(): + return DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4, np.nan, 3], + } + ) + + +@pytest.fixture +def planet_df(): + return DataFrame( + { + "planet": [ + "Mercury", + "Venus", + "Earth", + "Mars", + "Jupiter", + "Saturn", + "Uranus", + "Neptune", + ], + "type": [ + "terrestrial", + "terrestrial", + "terrestrial", + "terrestrial", + "gas giant", + "gas giant", + "ice giant", + "ice giant", + ], + "location": [ + "inner", + "inner", + "inner", + "inner", + "outer", + "outer", + "outer", + "outer", + ], + "mass": [ + 0.330114, + 4.86747, + 5.97237, + 0.641712, + 1898.187, + 568.3174, + 86.8127, + 102.4126, + ], + } + ) + + +@pytest.fixture +def from_file_expected(): + return """\ + + + + 0 + cooking + Everyday Italian + Giada De Laurentiis + 2005 + 30.0 + + + 1 + children + Harry Potter + J K. Rowling + 2005 + 29.99 + + + 2 + web + Learning XML + Erik T. Ray + 2003 + 39.95 + +""" + + +def equalize_decl(doc): + # etree and lxml differ on quotes and case in xml declaration + if doc is not None: + doc = doc.replace( + ' + + + cooking + Everyday Italian + Giada De Laurentiis + 2005 + 30.0 + + + children + Harry Potter + J K. Rowling + 2005 + 29.99 + + + web + Learning XML + Erik T. Ray + 2003 + 39.95 + +""" + + df_file = read_xml(xml_books, parser=parser) + + with tm.ensure_clean("test.xml") as path: + df_file.to_xml(path, index=False, parser=parser) + with open(path, "rb") as f: + output = f.read().decode("utf-8").strip() + + output = equalize_decl(output) + + assert output == expected + + +def test_index_false_rename_row_root(xml_books, parser): + expected = """\ + + + + cooking + Everyday Italian + Giada De Laurentiis + 2005 + 30.0 + + + children + Harry Potter + J K. Rowling + 2005 + 29.99 + + + web + Learning XML + Erik T. Ray + 2003 + 39.95 + +""" + + df_file = read_xml(xml_books, parser=parser) + + with tm.ensure_clean("test.xml") as path: + df_file.to_xml( + path, index=False, root_name="books", row_name="book", parser=parser + ) + with open(path, "rb") as f: + output = f.read().decode("utf-8").strip() + + output = equalize_decl(output) + + assert output == expected + + +@pytest.mark.parametrize( + "offset_index", [list(range(10, 13)), [str(i) for i in range(10, 13)]] +) +def test_index_false_with_offset_input_index(parser, offset_index, geom_df): + """ + Tests that the output does not contain the `` field when the index of the + input Dataframe has an offset. + + This is a regression test for issue #42458. + """ + + expected = """\ + + + + square + 360 + 4.0 + + + circle + 360 + + + + triangle + 180 + 3.0 + +""" + + offset_geom_df = geom_df.copy() + offset_geom_df.index = Index(offset_index) + output = offset_geom_df.to_xml(index=False, parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# NA_REP + +na_expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + +def test_na_elem_output(parser, geom_df): + output = geom_df.to_xml(parser=parser) + output = equalize_decl(output) + + assert output == na_expected + + +def test_na_empty_str_elem_option(parser, geom_df): + output = geom_df.to_xml(na_rep="", parser=parser) + output = equalize_decl(output) + + assert output == na_expected + + +def test_na_empty_elem_option(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + 0.0 + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml(na_rep="0.0", parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# ATTR_COLS + + +def test_attrs_cols_nan_output(parser, geom_df): + expected = """\ + + + + + +""" + + output = geom_df.to_xml(attr_cols=["shape", "degrees", "sides"], parser=parser) + output = equalize_decl(output) + + assert output == expected + + +def test_attrs_cols_prefix(parser, geom_df): + expected = """\ + + + + + +""" + + output = geom_df.to_xml( + attr_cols=["index", "shape", "degrees", "sides"], + namespaces={"doc": "http://example.xom"}, + prefix="doc", + parser=parser, + ) + output = equalize_decl(output) + + assert output == expected + + +def test_attrs_unknown_column(parser, geom_df): + with pytest.raises(KeyError, match=("no valid column")): + geom_df.to_xml(attr_cols=["shape", "degree", "sides"], parser=parser) + + +def test_attrs_wrong_type(parser, geom_df): + with pytest.raises(TypeError, match=("is not a valid type for attr_cols")): + geom_df.to_xml(attr_cols='"shape", "degree", "sides"', parser=parser) + + +# ELEM_COLS + + +def test_elems_cols_nan_output(parser, geom_df): + elems_cols_expected = """\ + + + + 360 + 4.0 + square + + + 360 + + circle + + + 180 + 3.0 + triangle + +""" + + output = geom_df.to_xml( + index=False, elem_cols=["degrees", "sides", "shape"], parser=parser + ) + output = equalize_decl(output) + + assert output == elems_cols_expected + + +def test_elems_unknown_column(parser, geom_df): + with pytest.raises(KeyError, match=("no valid column")): + geom_df.to_xml(elem_cols=["shape", "degree", "sides"], parser=parser) + + +def test_elems_wrong_type(parser, geom_df): + with pytest.raises(TypeError, match=("is not a valid type for elem_cols")): + geom_df.to_xml(elem_cols='"shape", "degree", "sides"', parser=parser) + + +def test_elems_and_attrs_cols(parser, geom_df): + elems_cols_expected = """\ + + + + 360 + 4.0 + + + 360 + + + + 180 + 3.0 + +""" + + output = geom_df.to_xml( + index=False, + elem_cols=["degrees", "sides"], + attr_cols=["shape"], + parser=parser, + ) + output = equalize_decl(output) + + assert output == elems_cols_expected + + +# HIERARCHICAL COLUMNS + + +def test_hierarchical_columns(parser, planet_df): + expected = """\ + + + + inner + terrestrial + 4 + 11.81 + 2.95 + + + outer + gas giant + 2 + 2466.5 + 1233.25 + + + outer + ice giant + 2 + 189.23 + 94.61 + + + All + + 8 + 2667.54 + 333.44 + +""" + + pvt = planet_df.pivot_table( + index=["location", "type"], + values="mass", + aggfunc=["count", "sum", "mean"], + margins=True, + ).round(2) + + output = pvt.to_xml(parser=parser) + output = equalize_decl(output) + + assert output == expected + + +def test_hierarchical_attrs_columns(parser, planet_df): + expected = """\ + + + + + + +""" + + pvt = planet_df.pivot_table( + index=["location", "type"], + values="mass", + aggfunc=["count", "sum", "mean"], + margins=True, + ).round(2) + + output = pvt.to_xml(attr_cols=list(pvt.reset_index().columns.values), parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# MULTIINDEX + + +def test_multi_index(parser, planet_df): + expected = """\ + + + + inner + terrestrial + 4 + 11.81 + 2.95 + + + outer + gas giant + 2 + 2466.5 + 1233.25 + + + outer + ice giant + 2 + 189.23 + 94.61 + +""" + + agg = ( + planet_df.groupby(["location", "type"])["mass"] + .agg(["count", "sum", "mean"]) + .round(2) + ) + + output = agg.to_xml(parser=parser) + output = equalize_decl(output) + + assert output == expected + + +def test_multi_index_attrs_cols(parser, planet_df): + expected = """\ + + + + + +""" + + agg = ( + planet_df.groupby(["location", "type"])["mass"] + .agg(["count", "sum", "mean"]) + .round(2) + ) + output = agg.to_xml(attr_cols=list(agg.reset_index().columns.values), parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# NAMESPACE + + +def test_default_namespace(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml(namespaces={"": "http://example.com"}, parser=parser) + output = equalize_decl(output) + + assert output == expected + + +def test_unused_namespaces(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml( + namespaces={"oth": "http://other.org", "ex": "http://example.com"}, + parser=parser, + ) + output = equalize_decl(output) + + assert output == expected + + +# PREFIX + + +def test_namespace_prefix(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml( + namespaces={"doc": "http://example.com"}, prefix="doc", parser=parser + ) + output = equalize_decl(output) + + assert output == expected + + +def test_missing_prefix_in_nmsp(parser, geom_df): + with pytest.raises(KeyError, match=("doc is not included in namespaces")): + geom_df.to_xml( + namespaces={"": "http://example.com"}, prefix="doc", parser=parser + ) + + +def test_namespace_prefix_and_default(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml( + namespaces={"": "http://example.com", "doc": "http://other.org"}, + prefix="doc", + parser=parser, + ) + output = equalize_decl(output) + + assert output == expected + + +# ENCODING + +encoding_expected = """\ + + + + 0 + 1 + José + Sofía + + + 1 + 2 + Luis + Valentina + + + 2 + 3 + Carlos + Isabella + + + 3 + 4 + Juan + Camila + + + 4 + 5 + Jorge + Valeria + +""" + + +def test_encoding_option_str(xml_baby_names, parser): + df_file = read_xml(xml_baby_names, parser=parser, encoding="ISO-8859-1").head(5) + + output = df_file.to_xml(encoding="ISO-8859-1", parser=parser) + + if output is not None: + # etree and lxml differ on quotes and case in xml declaration + output = output.replace( + ' + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml(xml_declaration=False) + + assert output == expected + + +def test_no_pretty_print_with_decl(parser, geom_df): + expected = ( + "\n" + "0square" + "3604.0" + "1circle360" + "2" + "triangle1803.0" + "" + ) + + output = geom_df.to_xml(pretty_print=False, parser=parser) + output = equalize_decl(output) + + # etree adds space for closed tags + if output is not None: + output = output.replace(" />", "/>") + + assert output == expected + + +def test_no_pretty_print_no_decl(parser, geom_df): + expected = ( + "0square" + "3604.0" + "1circle360" + "2" + "triangle1803.0" + "" + ) + + output = geom_df.to_xml(xml_declaration=False, pretty_print=False, parser=parser) + + # etree adds space for closed tags + if output is not None: + output = output.replace(" />", "/>") + + assert output == expected + + +# PARSER + + +@td.skip_if_installed("lxml") +def test_default_parser_no_lxml(geom_df): + with pytest.raises( + ImportError, match=("lxml not found, please install or use the etree parser.") + ): + geom_df.to_xml() + + +def test_unknown_parser(geom_df): + with pytest.raises( + ValueError, match=("Values for parser can only be lxml or etree.") + ): + geom_df.to_xml(parser="bs4") + + +# STYLESHEET + +xsl_expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + +def test_stylesheet_file_like(xsl_row_field_output, mode, geom_df): + pytest.importorskip("lxml") + with open( + xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None + ) as f: + assert geom_df.to_xml(stylesheet=f) == xsl_expected + + +def test_stylesheet_io(xsl_row_field_output, mode, geom_df): + # note: By default the bodies of untyped functions are not checked, + # consider using --check-untyped-defs + pytest.importorskip("lxml") + xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked] + + with open( + xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None + ) as f: + if mode == "rb": + xsl_obj = BytesIO(f.read()) + else: + xsl_obj = StringIO(f.read()) + + output = geom_df.to_xml(stylesheet=xsl_obj) + + assert output == xsl_expected + + +def test_stylesheet_buffered_reader(xsl_row_field_output, mode, geom_df): + pytest.importorskip("lxml") + with open( + xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None + ) as f: + xsl_obj = f.read() + + output = geom_df.to_xml(stylesheet=xsl_obj) + + assert output == xsl_expected + + +def test_stylesheet_wrong_path(geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = os.path.join("data", "xml", "row_field_output.xslt") + + with pytest.raises( + lxml_etree.XMLSyntaxError, + match=("Start tag expected, '<' not found"), + ): + geom_df.to_xml(stylesheet=xsl) + + +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_string_stylesheet(val, geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + msg = "|".join( + [ + "Document is empty", + "Start tag expected, '<' not found", + # Seen on Mac with lxml 4.9.1 + r"None \(line 0\)", + ] + ) + + with pytest.raises(lxml_etree.XMLSyntaxError, match=msg): + geom_df.to_xml(stylesheet=val) + + +def test_incorrect_xsl_syntax(geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + + + + + + + + + + +""" + + with pytest.raises( + lxml_etree.XMLSyntaxError, match=("Opening and ending tag mismatch") + ): + geom_df.to_xml(stylesheet=xsl) + + +def test_incorrect_xsl_eval(geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + + + + + + + + + + +""" + + with pytest.raises(lxml_etree.XSLTParseError, match=("failed to compile")): + geom_df.to_xml(stylesheet=xsl) + + +def test_incorrect_xsl_apply(geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + +""" + + with pytest.raises(lxml_etree.XSLTApplyError, match=("Cannot resolve URI")): + with tm.ensure_clean("test.xml") as path: + geom_df.to_xml(path, stylesheet=xsl) + + +def test_stylesheet_with_etree(geom_df): + xsl = """\ + + + + + + + + + """ + + with pytest.raises( + ValueError, match=("To use stylesheet, you need lxml installed") + ): + geom_df.to_xml(parser="etree", stylesheet=xsl) + + +def test_style_to_csv(geom_df): + pytest.importorskip("lxml") + xsl = """\ + + + + + , + + ,shape,degrees,sides + + + + + + + +""" + + out_csv = geom_df.to_csv(lineterminator="\n") + + if out_csv is not None: + out_csv = out_csv.strip() + out_xml = geom_df.to_xml(stylesheet=xsl) + + assert out_csv == out_xml + + +def test_style_to_string(geom_df): + pytest.importorskip("lxml") + xsl = """\ + + + + + + + shape degrees sides + + + + + + + +""" + + out_str = geom_df.to_string() + out_xml = geom_df.to_xml(na_rep="NaN", stylesheet=xsl) + + assert out_xml == out_str + + +def test_style_to_json(geom_df): + pytest.importorskip("lxml") + xsl = """\ + + + + + " + + + {"shape":{ + + },"degrees":{ + + },"sides":{ + + }} + + + + + + + + + + + + + + + + + , + + +""" + + out_json = geom_df.to_json() + out_xml = geom_df.to_xml(stylesheet=xsl) + + assert out_json == out_xml + + +# COMPRESSION + + +geom_xml = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + +def test_compression_output(parser, compression_only, geom_df): + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression=compression_only) + + with get_handle( + path, + "r", + compression=compression_only, + ) as handle_obj: + output = handle_obj.handle.read() + + output = equalize_decl(output) + + assert geom_xml == output.strip() + + +def test_filename_and_suffix_comp( + parser, compression_only, geom_df, compression_to_extension +): + compfile = "xml." + compression_to_extension[compression_only] + with tm.ensure_clean(filename=compfile) as path: + geom_df.to_xml(path, parser=parser, compression=compression_only) + + with get_handle( + path, + "r", + compression=compression_only, + ) as handle_obj: + output = handle_obj.handle.read() + + output = equalize_decl(output) + + assert geom_xml == output.strip() + + +def test_ea_dtypes(any_numeric_ea_dtype, parser): + # GH#43903 + expected = """ + + + 0 + + +""" + df = DataFrame({"a": [NA]}).astype(any_numeric_ea_dtype) + result = df.to_xml(parser=parser) + assert equalize_decl(result).strip() == expected + + +def test_unsuported_compression(parser, geom_df): + with pytest.raises(ValueError, match="Unrecognized compression type"): + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression="7z") + + +# STORAGE OPTIONS + + +@pytest.mark.single_cpu +def test_s3_permission_output(parser, s3_public_bucket, geom_df): + s3fs = pytest.importorskip("s3fs") + pytest.importorskip("lxml") + + with tm.external_error_raised((PermissionError, FileNotFoundError)): + fs = s3fs.S3FileSystem(anon=True) + fs.ls(s3_public_bucket.name) + + geom_df.to_xml( + f"s3://{s3_public_bucket.name}/geom.xml", compression="zip", parser=parser + ) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml.py b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml.py new file mode 100644 index 0000000000000000000000000000000000000000..6f429c1ecbf8aad459e78aba0b480299406483d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml.py @@ -0,0 +1,2097 @@ +from __future__ import annotations + +from io import ( + BytesIO, + StringIO, +) +from lzma import LZMAError +import os +from tarfile import ReadError +from urllib.error import HTTPError +from xml.etree.ElementTree import ParseError +from zipfile import BadZipFile + +import numpy as np +import pytest + +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + EmptyDataError, + ParserError, +) +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + NA, + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) +from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics + +from pandas.io.common import get_handle +from pandas.io.xml import read_xml + +# CHECK LIST + +# [x] - ValueError: "Values for parser can only be lxml or etree." + +# etree +# [X] - ImportError: "lxml not found, please install or use the etree parser." +# [X] - TypeError: "expected str, bytes or os.PathLike object, not NoneType" +# [X] - ValueError: "Either element or attributes can be parsed not both." +# [X] - ValueError: "xpath does not return any nodes..." +# [X] - SyntaxError: "You have used an incorrect or unsupported XPath" +# [X] - ValueError: "names does not match length of child elements in xpath." +# [X] - TypeError: "...is not a valid type for names" +# [X] - ValueError: "To use stylesheet, you need lxml installed..." +# [] - URLError: (GENERAL ERROR WITH HTTPError AS SUBCLASS) +# [X] - HTTPError: "HTTP Error 404: Not Found" +# [] - OSError: (GENERAL ERROR WITH FileNotFoundError AS SUBCLASS) +# [X] - FileNotFoundError: "No such file or directory" +# [] - ParseError (FAILSAFE CATCH ALL FOR VERY COMPLEX XML) +# [X] - UnicodeDecodeError: "'utf-8' codec can't decode byte 0xe9..." +# [X] - UnicodeError: "UTF-16 stream does not start with BOM" +# [X] - BadZipFile: "File is not a zip file" +# [X] - OSError: "Invalid data stream" +# [X] - LZMAError: "Input format not supported by decoder" +# [X] - ValueError: "Unrecognized compression type" +# [X] - PermissionError: "Forbidden" + +# lxml +# [X] - ValueError: "Either element or attributes can be parsed not both." +# [X] - AttributeError: "__enter__" +# [X] - XSLTApplyError: "Cannot resolve URI" +# [X] - XSLTParseError: "document is not a stylesheet" +# [X] - ValueError: "xpath does not return any nodes." +# [X] - XPathEvalError: "Invalid expression" +# [] - XPathSyntaxError: (OLD VERSION IN lxml FOR XPATH ERRORS) +# [X] - TypeError: "empty namespace prefix is not supported in XPath" +# [X] - ValueError: "names does not match length of child elements in xpath." +# [X] - TypeError: "...is not a valid type for names" +# [X] - LookupError: "unknown encoding" +# [] - URLError: (USUALLY DUE TO NETWORKING) +# [X - HTTPError: "HTTP Error 404: Not Found" +# [X] - OSError: "failed to load external entity" +# [X] - XMLSyntaxError: "Start tag expected, '<' not found" +# [] - ParserError: (FAILSAFE CATCH ALL FOR VERY COMPLEX XML +# [X] - ValueError: "Values for parser can only be lxml or etree." +# [X] - UnicodeDecodeError: "'utf-8' codec can't decode byte 0xe9..." +# [X] - UnicodeError: "UTF-16 stream does not start with BOM" +# [X] - BadZipFile: "File is not a zip file" +# [X] - OSError: "Invalid data stream" +# [X] - LZMAError: "Input format not supported by decoder" +# [X] - ValueError: "Unrecognized compression type" +# [X] - PermissionError: "Forbidden" + +geom_df = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4, np.nan, 3], + } +) + +xml_default_nmsp = """\ + + + + square + 360 + 4 + + + circle + 360 + + + + triangle + 180 + 3 + +""" + +xml_prefix_nmsp = """\ + + + + square + 360 + 4.0 + + + circle + 360 + + + + triangle + 180 + 3.0 + +""" + + +df_kml = DataFrame( + { + "id": { + 0: "ID_00001", + 1: "ID_00002", + 2: "ID_00003", + 3: "ID_00004", + 4: "ID_00005", + }, + "name": { + 0: "Blue Line (Forest Park)", + 1: "Red, Purple Line", + 2: "Red, Purple Line", + 3: "Red, Purple Line", + 4: "Red, Purple Line", + }, + "styleUrl": { + 0: "#LineStyle01", + 1: "#LineStyle01", + 2: "#LineStyle01", + 3: "#LineStyle01", + 4: "#LineStyle01", + }, + "extrude": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, + "altitudeMode": { + 0: "clampedToGround", + 1: "clampedToGround", + 2: "clampedToGround", + 3: "clampedToGround", + 4: "clampedToGround", + }, + "coordinates": { + 0: ( + "-87.77678526964958,41.8708863930319,0 " + "-87.77826234150609,41.87097820122218,0 " + "-87.78251583439344,41.87130129991005,0 " + "-87.78418294588424,41.87145055520308,0 " + "-87.7872369165933,41.8717239119163,0 " + "-87.79160214925886,41.87210797280065,0" + ), + 1: ( + "-87.65758750947528,41.96427269188822,0 " + "-87.65802133507393,41.96581929055245,0 " + "-87.65819033925305,41.96621846093642,0 " + "-87.6583189819129,41.96650362897086,0 " + "-87.65835858701473,41.96669002089185,0 " + "-87.65838428411853,41.96688150295095,0 " + "-87.65842208882658,41.96745896091846,0 " + "-87.65846556843937,41.9683761425439,0 " + "-87.65849296214573,41.96913893870342,0" + ), + 2: ( + "-87.65492939166126,41.95377494531437,0 " + "-87.65557043199591,41.95376544118533,0 " + "-87.65606302030132,41.95376391658746,0 " + "-87.65623502146268,41.95377379126367,0 " + "-87.65634748981634,41.95380103566435,0 " + "-87.65646537904269,41.95387703994676,0 " + "-87.65656532461145,41.95396622645799,0 " + "-87.65664760856414,41.95404201996044,0 " + "-87.65671750555913,41.95416647054043,0 " + "-87.65673983607117,41.95429949810849,0 " + "-87.65673866475777,41.95441024240925,0 " + "-87.6567690255541,41.95490657227902,0 " + "-87.65683672482363,41.95692259283837,0 " + "-87.6568900886376,41.95861070983142,0 " + "-87.65699865558875,41.96181418669004,0 " + "-87.65756347177603,41.96397045777844,0 " + "-87.65758750947528,41.96427269188822,0" + ), + 3: ( + "-87.65362593118043,41.94742799535678,0 " + "-87.65363554415794,41.94819886386848,0 " + "-87.6536456393239,41.95059994675451,0 " + "-87.65365831235026,41.95108288489359,0 " + "-87.6536604873874,41.9519954657554,0 " + "-87.65362592053201,41.95245597302328,0 " + "-87.65367158496069,41.95311153649393,0 " + "-87.65368468595476,41.9533202828916,0 " + "-87.65369271253692,41.95343095587119,0 " + "-87.65373335834569,41.95351536301472,0 " + "-87.65378605844126,41.95358212680591,0 " + "-87.65385067928185,41.95364452823767,0 " + "-87.6539390793817,41.95370263886964,0 " + "-87.6540786298351,41.95373403675265,0 " + "-87.65430648647626,41.9537535411832,0 " + "-87.65492939166126,41.95377494531437,0" + ), + 4: ( + "-87.65345391792157,41.94217681262115,0 " + "-87.65342448305786,41.94237224420864,0 " + "-87.65339745703922,41.94268217746244,0 " + "-87.65337753982941,41.94288140770284,0 " + "-87.65336256753105,41.94317369618263,0 " + "-87.65338799707138,41.94357253961736,0 " + "-87.65340240886648,41.94389158188269,0 " + "-87.65341837392448,41.94406444407721,0 " + "-87.65342275247338,41.94421065714904,0 " + "-87.65347469646018,41.94434829382345,0 " + "-87.65351486483024,41.94447699917548,0 " + "-87.65353483605053,41.9453896864472,0 " + "-87.65361975532807,41.94689193720703,0 " + "-87.65362593118043,41.94742799535678,0" + ), + }, + } +) + + +def test_literal_xml_deprecation(): + # GH 53809 + pytest.importorskip("lxml") + msg = ( + "Passing literal xml to 'read_xml' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object." + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + read_xml(xml_default_nmsp) + + +@pytest.fixture(params=["rb", "r"]) +def mode(request): + return request.param + + +@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"]) +def parser(request): + return request.param + + +def read_xml_iterparse(data, **kwargs): + with tm.ensure_clean() as path: + with open(path, "w", encoding="utf-8") as f: + f.write(data) + return read_xml(path, **kwargs) + + +def read_xml_iterparse_comp(comp_path, compression_only, **kwargs): + with get_handle(comp_path, "r", compression=compression_only) as handles: + with tm.ensure_clean() as path: + with open(path, "w", encoding="utf-8") as f: + f.write(handles.handle.read()) + return read_xml(path, **kwargs) + + +# FILE / URL + + +def test_parser_consistency_file(xml_books): + pytest.importorskip("lxml") + df_file_lxml = read_xml(xml_books, parser="lxml") + df_file_etree = read_xml(xml_books, parser="etree") + + df_iter_lxml = read_xml( + xml_books, + parser="lxml", + iterparse={"book": ["category", "title", "year", "author", "price"]}, + ) + df_iter_etree = read_xml( + xml_books, + parser="etree", + iterparse={"book": ["category", "title", "year", "author", "price"]}, + ) + + tm.assert_frame_equal(df_file_lxml, df_file_etree) + tm.assert_frame_equal(df_file_lxml, df_iter_lxml) + tm.assert_frame_equal(df_iter_lxml, df_iter_etree) + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_parser_consistency_url(parser, httpserver): + httpserver.serve_content(content=xml_default_nmsp) + + df_xpath = read_xml(StringIO(xml_default_nmsp), parser=parser) + df_iter = read_xml( + BytesIO(xml_default_nmsp.encode()), + parser=parser, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + tm.assert_frame_equal(df_xpath, df_iter) + + +def test_file_like(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + df_file = read_xml(f, parser=parser) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + + +def test_file_io(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + xml_obj = f.read() + + df_io = read_xml( + (BytesIO(xml_obj) if isinstance(xml_obj, bytes) else StringIO(xml_obj)), + parser=parser, + ) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_io, df_expected) + + +def test_file_buffered_reader_string(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + xml_obj = f.read() + + if mode == "rb": + xml_obj = StringIO(xml_obj.decode()) + elif mode == "r": + xml_obj = StringIO(xml_obj) + + df_str = read_xml(xml_obj, parser=parser) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_str, df_expected) + + +def test_file_buffered_reader_no_xml_declaration(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + next(f) + xml_obj = f.read() + + if mode == "rb": + xml_obj = StringIO(xml_obj.decode()) + elif mode == "r": + xml_obj = StringIO(xml_obj) + + df_str = read_xml(xml_obj, parser=parser) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_str, df_expected) + + +def test_string_charset(parser): + txt = "<中文標籤>12" + + df_str = read_xml(StringIO(txt), parser=parser) + + df_expected = DataFrame({"c1": 1, "c2": 2}, index=[0]) + + tm.assert_frame_equal(df_str, df_expected) + + +def test_file_charset(xml_doc_ch_utf, parser): + df_file = read_xml(xml_doc_ch_utf, parser=parser) + + df_expected = DataFrame( + { + "問": [ + "問 若箇是邪而言破邪 何者是正而道(Sorry, this is Big5 only)申正", + "問 既破有得申無得 亦應但破性執申假名以不", + "問 既破性申假 亦應但破有申無 若有無兩洗 亦應性假雙破耶", + ], + "答": [ + "".join( + [ + "答 邪既無量 正亦多途 大略為言不出二種 謂", + "有得與無得 有得是邪須破 無得是正須申\n\t\t故", + ] + ), + None, + "答 不例 有無皆是性 所以須雙破 既分性假異 故有破不破", + ], + "a": [ + None, + "答 性執是有得 假名是無得 今破有得申無得 即是破性執申假名也", + None, + ], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + + +def test_file_handle_close(xml_books, parser): + with open(xml_books, "rb") as f: + read_xml(BytesIO(f.read()), parser=parser) + + assert not f.closed + + +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_string_lxml(val): + lxml_etree = pytest.importorskip("lxml.etree") + + msg = "|".join( + [ + "Document is empty", + # Seen on Mac with lxml 4.91 + r"None \(line 0\)", + ] + ) + with pytest.raises(lxml_etree.XMLSyntaxError, match=msg): + if isinstance(val, str): + read_xml(StringIO(val), parser="lxml") + else: + read_xml(BytesIO(val), parser="lxml") + + +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_string_etree(val): + with pytest.raises(ParseError, match="no element found"): + if isinstance(val, str): + read_xml(StringIO(val), parser="etree") + else: + read_xml(BytesIO(val), parser="etree") + + +def test_wrong_file_path(parser): + msg = ( + "Passing literal xml to 'read_xml' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object." + ) + filename = os.path.join("data", "html", "books.xml") + + with pytest.raises( + FutureWarning, + match=msg, + ): + read_xml(filename, parser=parser) + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_url(httpserver, xml_file): + pytest.importorskip("lxml") + with open(xml_file, encoding="utf-8") as f: + httpserver.serve_content(content=f.read()) + df_url = read_xml(httpserver.url, xpath=".//book[count(*)=4]") + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_url, df_expected) + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_wrong_url(parser, httpserver): + httpserver.serve_content("NOT FOUND", code=404) + with pytest.raises(HTTPError, match=("HTTP Error 404: NOT FOUND")): + read_xml(httpserver.url, xpath=".//book[count(*)=4]", parser=parser) + + +# CONTENT + + +def test_whitespace(parser): + xml = """ + + + + square + + 360 + + + + circle + + 360 + + + + triangle + + 180 + + """ + + df_xpath = read_xml(StringIO(xml), parser=parser, dtype="string") + + df_iter = read_xml_iterparse( + xml, + parser=parser, + iterparse={"row": ["sides", "shape", "degrees"]}, + dtype="string", + ) + + df_expected = DataFrame( + { + "sides": [" 4 ", " 0 ", " 3 "], + "shape": [ + "\n square\n ", + "\n circle\n ", + "\n triangle\n ", + ], + "degrees": ["\t360\t", "\t360\t", "\t180\t"], + }, + dtype="string", + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +# XPATH + + +def test_empty_xpath_lxml(xml_books): + pytest.importorskip("lxml") + with pytest.raises(ValueError, match=("xpath does not return any nodes")): + read_xml(xml_books, xpath=".//python", parser="lxml") + + +def test_bad_xpath_etree(xml_books): + with pytest.raises( + SyntaxError, match=("You have used an incorrect or unsupported XPath") + ): + read_xml(xml_books, xpath=".//[book]", parser="etree") + + +def test_bad_xpath_lxml(xml_books): + lxml_etree = pytest.importorskip("lxml.etree") + + with pytest.raises(lxml_etree.XPathEvalError, match=("Invalid expression")): + read_xml(xml_books, xpath=".//[book]", parser="lxml") + + +# NAMESPACE + + +def test_default_namespace(parser): + df_nmsp = read_xml( + StringIO(xml_default_nmsp), + xpath=".//ns:row", + namespaces={"ns": "http://example.com"}, + parser=parser, + ) + + df_iter = read_xml_iterparse( + xml_default_nmsp, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_nmsp, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_prefix_namespace(parser): + df_nmsp = read_xml( + StringIO(xml_prefix_nmsp), + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser=parser, + ) + df_iter = read_xml_iterparse( + xml_prefix_nmsp, parser=parser, iterparse={"row": ["shape", "degrees", "sides"]} + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_nmsp, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_consistency_default_namespace(): + pytest.importorskip("lxml") + df_lxml = read_xml( + StringIO(xml_default_nmsp), + xpath=".//ns:row", + namespaces={"ns": "http://example.com"}, + parser="lxml", + ) + + df_etree = read_xml( + StringIO(xml_default_nmsp), + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser="etree", + ) + + tm.assert_frame_equal(df_lxml, df_etree) + + +def test_consistency_prefix_namespace(): + pytest.importorskip("lxml") + df_lxml = read_xml( + StringIO(xml_prefix_nmsp), + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser="lxml", + ) + + df_etree = read_xml( + StringIO(xml_prefix_nmsp), + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser="etree", + ) + + tm.assert_frame_equal(df_lxml, df_etree) + + +# PREFIX + + +def test_missing_prefix_with_default_namespace(xml_books, parser): + with pytest.raises(ValueError, match=("xpath does not return any nodes")): + read_xml(xml_books, xpath=".//Placemark", parser=parser) + + +def test_missing_prefix_definition_etree(kml_cta_rail_lines): + with pytest.raises(SyntaxError, match=("you used an undeclared namespace prefix")): + read_xml(kml_cta_rail_lines, xpath=".//kml:Placemark", parser="etree") + + +def test_missing_prefix_definition_lxml(kml_cta_rail_lines): + lxml_etree = pytest.importorskip("lxml.etree") + + with pytest.raises(lxml_etree.XPathEvalError, match=("Undefined namespace prefix")): + read_xml(kml_cta_rail_lines, xpath=".//kml:Placemark", parser="lxml") + + +@pytest.mark.parametrize("key", ["", None]) +def test_none_namespace_prefix(key): + pytest.importorskip("lxml") + with pytest.raises( + TypeError, match=("empty namespace prefix is not supported in XPath") + ): + read_xml( + StringIO(xml_default_nmsp), + xpath=".//kml:Placemark", + namespaces={key: "http://www.opengis.net/kml/2.2"}, + parser="lxml", + ) + + +# ELEMS AND ATTRS + + +def test_file_elems_and_attrs(xml_books, parser): + df_file = read_xml(xml_books, parser=parser) + df_iter = read_xml( + xml_books, + parser=parser, + iterparse={"book": ["category", "title", "author", "year", "price"]}, + ) + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_file_only_attrs(xml_books, parser): + df_file = read_xml(xml_books, attrs_only=True, parser=parser) + df_iter = read_xml(xml_books, parser=parser, iterparse={"book": ["category"]}) + df_expected = DataFrame({"category": ["cooking", "children", "web"]}) + + tm.assert_frame_equal(df_file, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_file_only_elems(xml_books, parser): + df_file = read_xml(xml_books, elems_only=True, parser=parser) + df_iter = read_xml( + xml_books, + parser=parser, + iterparse={"book": ["title", "author", "year", "price"]}, + ) + df_expected = DataFrame( + { + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_elem_and_attrs_only(kml_cta_rail_lines, parser): + with pytest.raises( + ValueError, + match=("Either element or attributes can be parsed not both"), + ): + read_xml(kml_cta_rail_lines, elems_only=True, attrs_only=True, parser=parser) + + +def test_empty_attrs_only(parser): + xml = """ + + + square + 360 + + + circle + 360 + + + triangle + 180 + + """ + + with pytest.raises( + ValueError, + match=("xpath does not return any nodes or attributes"), + ): + read_xml(StringIO(xml), xpath="./row", attrs_only=True, parser=parser) + + +def test_empty_elems_only(parser): + xml = """ + + + + + """ + + with pytest.raises( + ValueError, + match=("xpath does not return any nodes or attributes"), + ): + read_xml(StringIO(xml), xpath="./row", elems_only=True, parser=parser) + + +def test_attribute_centric_xml(): + pytest.importorskip("lxml") + xml = """\ + + + + + + + + + + + + + + + + + +""" + + df_lxml = read_xml(StringIO(xml), xpath=".//station") + df_etree = read_xml(StringIO(xml), xpath=".//station", parser="etree") + + df_iter_lx = read_xml_iterparse(xml, iterparse={"station": ["Name", "coords"]}) + df_iter_et = read_xml_iterparse( + xml, parser="etree", iterparse={"station": ["Name", "coords"]} + ) + + tm.assert_frame_equal(df_lxml, df_etree) + tm.assert_frame_equal(df_iter_lx, df_iter_et) + + +# NAMES + + +def test_names_option_output(xml_books, parser): + df_file = read_xml( + xml_books, names=["Col1", "Col2", "Col3", "Col4", "Col5"], parser=parser + ) + df_iter = read_xml( + xml_books, + parser=parser, + names=["Col1", "Col2", "Col3", "Col4", "Col5"], + iterparse={"book": ["category", "title", "author", "year", "price"]}, + ) + + df_expected = DataFrame( + { + "Col1": ["cooking", "children", "web"], + "Col2": ["Everyday Italian", "Harry Potter", "Learning XML"], + "Col3": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "Col4": [2005, 2005, 2003], + "Col5": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_repeat_names(parser): + xml = """\ + + + circle + curved + + + sphere + curved + +""" + df_xpath = read_xml( + StringIO(xml), + xpath=".//shape", + parser=parser, + names=["type_dim", "shape", "type_edge"], + ) + + df_iter = read_xml_iterparse( + xml, + parser=parser, + iterparse={"shape": ["type", "name", "type"]}, + names=["type_dim", "shape", "type_edge"], + ) + + df_expected = DataFrame( + { + "type_dim": ["2D", "3D"], + "shape": ["circle", "sphere"], + "type_edge": ["curved", "curved"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_repeat_values_new_names(parser): + xml = """\ + + + rectangle + rectangle + + + square + rectangle + + + ellipse + ellipse + + + circle + ellipse + +""" + df_xpath = read_xml( + StringIO(xml), xpath=".//shape", parser=parser, names=["name", "group"] + ) + + df_iter = read_xml_iterparse( + xml, + parser=parser, + iterparse={"shape": ["name", "family"]}, + names=["name", "group"], + ) + + df_expected = DataFrame( + { + "name": ["rectangle", "square", "ellipse", "circle"], + "group": ["rectangle", "rectangle", "ellipse", "ellipse"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_repeat_elements(parser): + xml = """\ + + + circle + ellipse + 360 + 0 + + + triangle + polygon + 180 + 3 + + + square + polygon + 360 + 4 + +""" + df_xpath = read_xml( + StringIO(xml), + xpath=".//shape", + parser=parser, + names=["name", "family", "degrees", "sides"], + ) + + df_iter = read_xml_iterparse( + xml, + parser=parser, + iterparse={"shape": ["value", "value", "value", "value"]}, + names=["name", "family", "degrees", "sides"], + ) + + df_expected = DataFrame( + { + "name": ["circle", "triangle", "square"], + "family": ["ellipse", "polygon", "polygon"], + "degrees": [360, 180, 360], + "sides": [0, 3, 4], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_names_option_wrong_length(xml_books, parser): + with pytest.raises(ValueError, match=("names does not match length")): + read_xml(xml_books, names=["Col1", "Col2", "Col3"], parser=parser) + + +def test_names_option_wrong_type(xml_books, parser): + with pytest.raises(TypeError, match=("is not a valid type for names")): + read_xml(xml_books, names="Col1, Col2, Col3", parser=parser) + + +# ENCODING + + +def test_wrong_encoding(xml_baby_names, parser): + with pytest.raises(UnicodeDecodeError, match=("'utf-8' codec can't decode")): + read_xml(xml_baby_names, parser=parser) + + +def test_utf16_encoding(xml_baby_names, parser): + with pytest.raises( + UnicodeError, + match=( + "UTF-16 stream does not start with BOM|" + "'utf-16-le' codec can't decode byte" + ), + ): + read_xml(xml_baby_names, encoding="UTF-16", parser=parser) + + +def test_unknown_encoding(xml_baby_names, parser): + with pytest.raises(LookupError, match=("unknown encoding: UFT-8")): + read_xml(xml_baby_names, encoding="UFT-8", parser=parser) + + +def test_ascii_encoding(xml_baby_names, parser): + with pytest.raises(UnicodeDecodeError, match=("'ascii' codec can't decode byte")): + read_xml(xml_baby_names, encoding="ascii", parser=parser) + + +def test_parser_consistency_with_encoding(xml_baby_names): + pytest.importorskip("lxml") + df_xpath_lxml = read_xml(xml_baby_names, parser="lxml", encoding="ISO-8859-1") + df_xpath_etree = read_xml(xml_baby_names, parser="etree", encoding="iso-8859-1") + + df_iter_lxml = read_xml( + xml_baby_names, + parser="lxml", + encoding="ISO-8859-1", + iterparse={"row": ["rank", "malename", "femalename"]}, + ) + df_iter_etree = read_xml( + xml_baby_names, + parser="etree", + encoding="ISO-8859-1", + iterparse={"row": ["rank", "malename", "femalename"]}, + ) + + tm.assert_frame_equal(df_xpath_lxml, df_xpath_etree) + tm.assert_frame_equal(df_xpath_etree, df_iter_etree) + tm.assert_frame_equal(df_iter_lxml, df_iter_etree) + + +def test_wrong_encoding_for_lxml(): + pytest.importorskip("lxml") + # GH#45133 + data = """ + + c + + +""" + with pytest.raises(TypeError, match="encoding None"): + read_xml(StringIO(data), parser="lxml", encoding=None) + + +def test_none_encoding_etree(): + # GH#45133 + data = """ + + c + + +""" + result = read_xml(StringIO(data), parser="etree", encoding=None) + expected = DataFrame({"a": ["c"]}) + tm.assert_frame_equal(result, expected) + + +# PARSER + + +@td.skip_if_installed("lxml") +def test_default_parser_no_lxml(xml_books): + with pytest.raises( + ImportError, match=("lxml not found, please install or use the etree parser.") + ): + read_xml(xml_books) + + +def test_wrong_parser(xml_books): + with pytest.raises( + ValueError, match=("Values for parser can only be lxml or etree.") + ): + read_xml(xml_books, parser="bs4") + + +# STYLESHEET + + +def test_stylesheet_file(kml_cta_rail_lines, xsl_flatten_doc): + pytest.importorskip("lxml") + df_style = read_xml( + kml_cta_rail_lines, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=xsl_flatten_doc, + ) + + df_iter = read_xml( + kml_cta_rail_lines, + iterparse={ + "Placemark": [ + "id", + "name", + "styleUrl", + "extrude", + "altitudeMode", + "coordinates", + ] + }, + ) + + tm.assert_frame_equal(df_kml, df_style) + tm.assert_frame_equal(df_kml, df_iter) + + +def test_stylesheet_file_like(kml_cta_rail_lines, xsl_flatten_doc, mode): + pytest.importorskip("lxml") + with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f: + df_style = read_xml( + kml_cta_rail_lines, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=f, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +def test_stylesheet_io(kml_cta_rail_lines, xsl_flatten_doc, mode): + # note: By default the bodies of untyped functions are not checked, + # consider using --check-untyped-defs + pytest.importorskip("lxml") + xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked] + + with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f: + if mode == "rb": + xsl_obj = BytesIO(f.read()) + else: + xsl_obj = StringIO(f.read()) + + df_style = read_xml( + kml_cta_rail_lines, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=xsl_obj, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +def test_stylesheet_buffered_reader(kml_cta_rail_lines, xsl_flatten_doc, mode): + pytest.importorskip("lxml") + with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f: + xsl_obj = f.read() + + df_style = read_xml( + kml_cta_rail_lines, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=xsl_obj, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +def test_style_charset(): + pytest.importorskip("lxml") + xml = "<中文標籤>12" + + xsl = """\ + + + + + + + + + + + + <根> + + + + +""" + + df_orig = read_xml(StringIO(xml)) + df_style = read_xml(StringIO(xml), stylesheet=xsl) + + tm.assert_frame_equal(df_orig, df_style) + + +def test_not_stylesheet(kml_cta_rail_lines, xml_books): + lxml_etree = pytest.importorskip("lxml.etree") + + with pytest.raises( + lxml_etree.XSLTParseError, match=("document is not a stylesheet") + ): + read_xml(kml_cta_rail_lines, stylesheet=xml_books) + + +def test_incorrect_xsl_syntax(kml_cta_rail_lines): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + + + + + + + +""" + + with pytest.raises( + lxml_etree.XMLSyntaxError, match=("Extra content at the end of the document") + ): + read_xml(kml_cta_rail_lines, stylesheet=xsl) + + +def test_incorrect_xsl_eval(kml_cta_rail_lines): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + + + + + + + +""" + + with pytest.raises(lxml_etree.XSLTParseError, match=("failed to compile")): + read_xml(kml_cta_rail_lines, stylesheet=xsl) + + +def test_incorrect_xsl_apply(kml_cta_rail_lines): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + +""" + + with pytest.raises(lxml_etree.XSLTApplyError, match=("Cannot resolve URI")): + read_xml(kml_cta_rail_lines, stylesheet=xsl) + + +def test_wrong_stylesheet(kml_cta_rail_lines, xml_data_path): + xml_etree = pytest.importorskip("lxml.etree") + + xsl = xml_data_path / "flatten.xsl" + + with pytest.raises( + xml_etree.XMLSyntaxError, + match=("Start tag expected, '<' not found"), + ): + read_xml(kml_cta_rail_lines, stylesheet=xsl) + + +def test_stylesheet_file_close(kml_cta_rail_lines, xsl_flatten_doc, mode): + # note: By default the bodies of untyped functions are not checked, + # consider using --check-untyped-defs + pytest.importorskip("lxml") + xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked] + + with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f: + if mode == "rb": + xsl_obj = BytesIO(f.read()) + else: + xsl_obj = StringIO(f.read()) + + read_xml(kml_cta_rail_lines, stylesheet=xsl_obj) + + assert not f.closed + + +def test_stylesheet_with_etree(kml_cta_rail_lines, xsl_flatten_doc): + pytest.importorskip("lxml") + with pytest.raises( + ValueError, match=("To use stylesheet, you need lxml installed") + ): + read_xml(kml_cta_rail_lines, parser="etree", stylesheet=xsl_flatten_doc) + + +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_stylesheet(val): + pytest.importorskip("lxml") + msg = ( + "Passing literal xml to 'read_xml' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object." + ) + kml = os.path.join("data", "xml", "cta_rail_lines.kml") + + with pytest.raises(FutureWarning, match=msg): + read_xml(kml, stylesheet=val) + + +# ITERPARSE +def test_file_like_iterparse(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + if mode == "r" and parser == "lxml": + with pytest.raises( + TypeError, match=("reading file objects must return bytes objects") + ): + read_xml( + f, + parser=parser, + iterparse={ + "book": ["category", "title", "year", "author", "price"] + }, + ) + return None + else: + df_filelike = read_xml( + f, + parser=parser, + iterparse={"book": ["category", "title", "year", "author", "price"]}, + ) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_filelike, df_expected) + + +def test_file_io_iterparse(xml_books, parser, mode): + funcIO = StringIO if mode == "r" else BytesIO + with open( + xml_books, + mode, + encoding="utf-8" if mode == "r" else None, + ) as f: + with funcIO(f.read()) as b: + if mode == "r" and parser == "lxml": + with pytest.raises( + TypeError, match=("reading file objects must return bytes objects") + ): + read_xml( + b, + parser=parser, + iterparse={ + "book": ["category", "title", "year", "author", "price"] + }, + ) + return None + else: + df_fileio = read_xml( + b, + parser=parser, + iterparse={ + "book": ["category", "title", "year", "author", "price"] + }, + ) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_fileio, df_expected) + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_url_path_error(parser, httpserver, xml_file): + with open(xml_file, encoding="utf-8") as f: + httpserver.serve_content(content=f.read()) + with pytest.raises( + ParserError, match=("iterparse is designed for large XML files") + ): + read_xml( + httpserver.url, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + +def test_compression_error(parser, compression_only): + with tm.ensure_clean(filename="geom_xml.zip") as path: + geom_df.to_xml(path, parser=parser, compression=compression_only) + + with pytest.raises( + ParserError, match=("iterparse is designed for large XML files") + ): + read_xml( + path, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + compression=compression_only, + ) + + +def test_wrong_dict_type(xml_books, parser): + with pytest.raises(TypeError, match="list is not a valid type for iterparse"): + read_xml( + xml_books, + parser=parser, + iterparse=["category", "title", "year", "author", "price"], + ) + + +def test_wrong_dict_value(xml_books, parser): + with pytest.raises( + TypeError, match=" is not a valid type for value in iterparse" + ): + read_xml(xml_books, parser=parser, iterparse={"book": "category"}) + + +def test_bad_xml(parser): + bad_xml = """\ + + + square + 00360 + 4.0 + 2020-01-01 + + + circle + 00360 + + 2021-01-01 + + + triangle + 00180 + 3.0 + 2022-01-01 + +""" + with tm.ensure_clean(filename="bad.xml") as path: + with open(path, "w", encoding="utf-8") as f: + f.write(bad_xml) + + with pytest.raises( + SyntaxError, + match=( + "Extra content at the end of the document|" + "junk after document element" + ), + ): + read_xml( + path, + parser=parser, + parse_dates=["date"], + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + +def test_comment(parser): + xml = """\ + + + + + circle + 2D + + + sphere + 3D + + + + +""" + + df_xpath = read_xml(StringIO(xml), xpath=".//shape", parser=parser) + + df_iter = read_xml_iterparse( + xml, parser=parser, iterparse={"shape": ["name", "type"]} + ) + + df_expected = DataFrame( + { + "name": ["circle", "sphere"], + "type": ["2D", "3D"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtd(parser): + xml = """\ + + + + +]> + + + circle + 2D + + + sphere + 3D + +""" + + df_xpath = read_xml(StringIO(xml), xpath=".//shape", parser=parser) + + df_iter = read_xml_iterparse( + xml, parser=parser, iterparse={"shape": ["name", "type"]} + ) + + df_expected = DataFrame( + { + "name": ["circle", "sphere"], + "type": ["2D", "3D"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_processing_instruction(parser): + xml = """\ + + + + + +, , ?> + + + circle + 2D + + + sphere + 3D + +""" + + df_xpath = read_xml(StringIO(xml), xpath=".//shape", parser=parser) + + df_iter = read_xml_iterparse( + xml, parser=parser, iterparse={"shape": ["name", "type"]} + ) + + df_expected = DataFrame( + { + "name": ["circle", "sphere"], + "type": ["2D", "3D"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_no_result(xml_books, parser): + with pytest.raises( + ParserError, match="No result from selected items in iterparse." + ): + read_xml( + xml_books, + parser=parser, + iterparse={"node": ["attr1", "elem1", "elem2", "elem3"]}, + ) + + +def test_empty_data(xml_books, parser): + with pytest.raises(EmptyDataError, match="No columns to parse from file"): + read_xml( + xml_books, + parser=parser, + iterparse={"book": ["attr1", "elem1", "elem2", "elem3"]}, + ) + + +def test_online_stylesheet(): + pytest.importorskip("lxml") + xml = """\ + + + + Empire Burlesque + Bob Dylan + USA + Columbia + 10.90 + 1985 + + + Hide your heart + Bonnie Tyler + UK + CBS Records + 9.90 + 1988 + + + Greatest Hits + Dolly Parton + USA + RCA + 9.90 + 1982 + + + Still got the blues + Gary Moore + UK + Virgin records + 10.20 + 1990 + + + Eros + Eros Ramazzotti + EU + BMG + 9.90 + 1997 + + + One night only + Bee Gees + UK + Polydor + 10.90 + 1998 + + + Sylvias Mother + Dr.Hook + UK + CBS + 8.10 + 1973 + + + Maggie May + Rod Stewart + UK + Pickwick + 8.50 + 1990 + + + Romanza + Andrea Bocelli + EU + Polydor + 10.80 + 1996 + + + When a man loves a woman + Percy Sledge + USA + Atlantic + 8.70 + 1987 + + + Black angel + Savage Rose + EU + Mega + 10.90 + 1995 + + + 1999 Grammy Nominees + Many + USA + Grammy + 10.20 + 1999 + + + For the good times + Kenny Rogers + UK + Mucik Master + 8.70 + 1995 + + + Big Willie style + Will Smith + USA + Columbia + 9.90 + 1997 + + + Tupelo Honey + Van Morrison + UK + Polydor + 8.20 + 1971 + + + Soulsville + Jorn Hoel + Norway + WEA + 7.90 + 1996 + + + The very best of + Cat Stevens + UK + Island + 8.90 + 1990 + + + Stop + Sam Brown + UK + A and M + 8.90 + 1988 + + + Bridge of Spies + T`Pau + UK + Siren + 7.90 + 1987 + + + Private Dancer + Tina Turner + UK + Capitol + 8.90 + 1983 + + + Midt om natten + Kim Larsen + EU + Medley + 7.80 + 1983 + + + Pavarotti Gala Concert + Luciano Pavarotti + UK + DECCA + 9.90 + 1991 + + + The dock of the bay + Otis Redding + USA + Stax Records + 7.90 + 1968 + + + Picture book + Simply Red + EU + Elektra + 7.20 + 1985 + + + Red + The Communards + UK + London + 7.80 + 1987 + + + Unchain my heart + Joe Cocker + USA + EMI + 8.20 + 1987 + + +""" + xsl = """\ + + + + + +

My CD Collection

+ + + + + + + + + + + +
TitleArtist
+ + +
+
+""" + + df_xsl = read_xml( + StringIO(xml), + xpath=".//tr[td and position() <= 6]", + names=["title", "artist"], + stylesheet=xsl, + ) + + df_expected = DataFrame( + { + "title": { + 0: "Empire Burlesque", + 1: "Hide your heart", + 2: "Greatest Hits", + 3: "Still got the blues", + 4: "Eros", + }, + "artist": { + 0: "Bob Dylan", + 1: "Bonnie Tyler", + 2: "Dolly Parton", + 3: "Gary Moore", + 4: "Eros Ramazzotti", + }, + } + ) + + tm.assert_frame_equal(df_expected, df_xsl) + + +# COMPRESSION + + +def test_compression_read(parser, compression_only): + with tm.ensure_clean() as comp_path: + geom_df.to_xml( + comp_path, index=False, parser=parser, compression=compression_only + ) + + df_xpath = read_xml(comp_path, parser=parser, compression=compression_only) + + df_iter = read_xml_iterparse_comp( + comp_path, + compression_only, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides"]}, + compression=compression_only, + ) + + tm.assert_frame_equal(df_xpath, geom_df) + tm.assert_frame_equal(df_iter, geom_df) + + +def test_wrong_compression(parser, compression, compression_only): + actual_compression = compression + attempted_compression = compression_only + + if actual_compression == attempted_compression: + pytest.skip(f"{actual_compression} == {attempted_compression}") + + errors = { + "bz2": (OSError, "Invalid data stream"), + "gzip": (OSError, "Not a gzipped file"), + "zip": (BadZipFile, "File is not a zip file"), + "tar": (ReadError, "file could not be opened successfully"), + } + zstd = import_optional_dependency("zstandard", errors="ignore") + if zstd is not None: + errors["zstd"] = (zstd.ZstdError, "Unknown frame descriptor") + lzma = import_optional_dependency("lzma", errors="ignore") + if lzma is not None: + errors["xz"] = (LZMAError, "Input format not supported by decoder") + error_cls, error_str = errors[attempted_compression] + + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression=actual_compression) + + with pytest.raises(error_cls, match=error_str): + read_xml(path, parser=parser, compression=attempted_compression) + + +def test_unsuported_compression(parser): + with pytest.raises(ValueError, match="Unrecognized compression type"): + with tm.ensure_clean() as path: + read_xml(path, parser=parser, compression="7z") + + +# STORAGE OPTIONS + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_s3_parser_consistency(s3_public_bucket_with_data, s3so): + pytest.importorskip("s3fs") + pytest.importorskip("lxml") + s3 = f"s3://{s3_public_bucket_with_data.name}/books.xml" + + df_lxml = read_xml(s3, parser="lxml", storage_options=s3so) + + df_etree = read_xml(s3, parser="etree", storage_options=s3so) + + tm.assert_frame_equal(df_lxml, df_etree) + + +def test_read_xml_nullable_dtypes( + parser, string_storage, dtype_backend, using_infer_string +): + # GH#50500 + data = """ + + + x + 1 + 4.0 + x + 2 + 4.0 + + True + False + + + y + 2 + 5.0 + + + + + False + + +""" + + if using_infer_string: + pa = pytest.importorskip("pyarrow") + string_array = ArrowStringArrayNumpySemantics(pa.array(["x", "y"])) + string_array_na = ArrowStringArrayNumpySemantics(pa.array(["x", None])) + + elif string_storage == "python": + string_array = StringArray(np.array(["x", "y"], dtype=np.object_)) + string_array_na = StringArray(np.array(["x", NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["x", "y"])) + string_array_na = ArrowExtensionArray(pa.array(["x", None])) + + else: + pa = pytest.importorskip("pyarrow") + string_array = ArrowStringArray(pa.array(["x", "y"])) + string_array_na = ArrowStringArray(pa.array(["x", None])) + + with pd.option_context("mode.string_storage", string_storage): + result = read_xml(StringIO(data), parser=parser, dtype_backend=dtype_backend) + + expected = DataFrame( + { + "a": string_array, + "b": Series([1, 2], dtype="Int64"), + "c": Series([4.0, 5.0], dtype="Float64"), + "d": string_array_na, + "e": Series([2, NA], dtype="Int64"), + "f": Series([4.0, NA], dtype="Float64"), + "g": Series([NA, NA], dtype="Int64"), + "h": Series([True, False], dtype="boolean"), + "i": Series([False, NA], dtype="boolean"), + } + ) + + if dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + expected["g"] = ArrowExtensionArray(pa.array([None, None])) + + tm.assert_frame_equal(result, expected) + + +def test_invalid_dtype_backend(): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + read_xml("test", dtype_backend="numpy") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml_dtypes.py b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..a85576ff13f5c1011b41c0ba4735619c5f5fb742 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml_dtypes.py @@ -0,0 +1,485 @@ +from __future__ import annotations + +from io import StringIO + +import pytest + +from pandas.errors import ParserWarning +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + to_datetime, +) +import pandas._testing as tm + +from pandas.io.xml import read_xml + + +@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"]) +def parser(request): + return request.param + + +@pytest.fixture( + params=[None, {"book": ["category", "title", "author", "year", "price"]}] +) +def iterparse(request): + return request.param + + +def read_xml_iterparse(data, **kwargs): + with tm.ensure_clean() as path: + with open(path, "w", encoding="utf-8") as f: + f.write(data) + return read_xml(path, **kwargs) + + +xml_types = """\ + + + + square + 00360 + 4.0 + + + circle + 00360 + + + + triangle + 00180 + 3.0 + +""" + +xml_dates = """ + + + square + 00360 + 4.0 + 2020-01-01 + + + circle + 00360 + + 2021-01-01 + + + triangle + 00180 + 3.0 + 2022-01-01 + +""" + + +# DTYPE + + +def test_dtype_single_str(parser): + df_result = read_xml(StringIO(xml_types), dtype={"degrees": "str"}, parser=parser) + df_iter = read_xml_iterparse( + xml_types, + parser=parser, + dtype={"degrees": "str"}, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": ["00360", "00360", "00180"], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtypes_all_str(parser): + df_result = read_xml(StringIO(xml_dates), dtype="string", parser=parser) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + dtype="string", + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": ["00360", "00360", "00180"], + "sides": ["4.0", None, "3.0"], + "date": ["2020-01-01", "2021-01-01", "2022-01-01"], + }, + dtype="string", + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtypes_with_names(parser): + df_result = read_xml( + StringIO(xml_dates), + names=["Col1", "Col2", "Col3", "Col4"], + dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64[ns]"}, + parser=parser, + ) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + names=["Col1", "Col2", "Col3", "Col4"], + dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64[ns]"}, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "Col1": ["square", "circle", "triangle"], + "Col2": Series(["00360", "00360", "00180"]).astype("string"), + "Col3": Series([4.0, float("nan"), 3.0]).astype("Int64"), + "Col4": DatetimeIndex( + ["2020-01-01", "2021-01-01", "2022-01-01"], dtype="M8[ns]" + ), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtype_nullable_int(parser): + df_result = read_xml(StringIO(xml_types), dtype={"sides": "Int64"}, parser=parser) + df_iter = read_xml_iterparse( + xml_types, + parser=parser, + dtype={"sides": "Int64"}, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": Series([4.0, float("nan"), 3.0]).astype("Int64"), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtype_float(parser): + df_result = read_xml(StringIO(xml_types), dtype={"degrees": "float"}, parser=parser) + df_iter = read_xml_iterparse( + xml_types, + parser=parser, + dtype={"degrees": "float"}, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": Series([360, 360, 180]).astype("float"), + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_wrong_dtype(xml_books, parser, iterparse): + with pytest.raises( + ValueError, match=('Unable to parse string "Everyday Italian" at position 0') + ): + read_xml( + xml_books, dtype={"title": "Int64"}, parser=parser, iterparse=iterparse + ) + + +def test_both_dtype_converters(parser): + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": ["00360", "00360", "00180"], + "sides": [4.0, float("nan"), 3.0], + } + ) + + with tm.assert_produces_warning(ParserWarning, match="Both a converter and dtype"): + df_result = read_xml( + StringIO(xml_types), + dtype={"degrees": "str"}, + converters={"degrees": str}, + parser=parser, + ) + df_iter = read_xml_iterparse( + xml_types, + dtype={"degrees": "str"}, + converters={"degrees": str}, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +# CONVERTERS + + +def test_converters_str(parser): + df_result = read_xml( + StringIO(xml_types), converters={"degrees": str}, parser=parser + ) + df_iter = read_xml_iterparse( + xml_types, + parser=parser, + converters={"degrees": str}, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": ["00360", "00360", "00180"], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_converters_date(parser): + convert_to_datetime = lambda x: to_datetime(x) + df_result = read_xml( + StringIO(xml_dates), converters={"date": convert_to_datetime}, parser=parser + ) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + converters={"date": convert_to_datetime}, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_wrong_converters_type(xml_books, parser, iterparse): + with pytest.raises(TypeError, match=("Type converters must be a dict or subclass")): + read_xml( + xml_books, converters={"year", str}, parser=parser, iterparse=iterparse + ) + + +def test_callable_func_converters(xml_books, parser, iterparse): + with pytest.raises(TypeError, match=("'float' object is not callable")): + read_xml( + xml_books, converters={"year": float()}, parser=parser, iterparse=iterparse + ) + + +def test_callable_str_converters(xml_books, parser, iterparse): + with pytest.raises(TypeError, match=("'str' object is not callable")): + read_xml( + xml_books, converters={"year": "float"}, parser=parser, iterparse=iterparse + ) + + +# PARSE DATES + + +def test_parse_dates_column_name(parser): + df_result = read_xml(StringIO(xml_dates), parse_dates=["date"], parser=parser) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + parse_dates=["date"], + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_parse_dates_column_index(parser): + df_result = read_xml(StringIO(xml_dates), parse_dates=[3], parser=parser) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + parse_dates=[3], + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_parse_dates_true(parser): + df_result = read_xml(StringIO(xml_dates), parse_dates=True, parser=parser) + + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + parse_dates=True, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": ["2020-01-01", "2021-01-01", "2022-01-01"], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_parse_dates_dictionary(parser): + xml = """ + + + square + 360 + 4.0 + 2020 + 12 + 31 + + + circle + 360 + + 2021 + 12 + 31 + + + triangle + 180 + 3.0 + 2022 + 12 + 31 + +""" + + df_result = read_xml( + StringIO(xml), parse_dates={"date_end": ["year", "month", "day"]}, parser=parser + ) + df_iter = read_xml_iterparse( + xml, + parser=parser, + parse_dates={"date_end": ["year", "month", "day"]}, + iterparse={"row": ["shape", "degrees", "sides", "year", "month", "day"]}, + ) + + df_expected = DataFrame( + { + "date_end": to_datetime(["2020-12-31", "2021-12-31", "2022-12-31"]), + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_day_first_parse_dates(parser): + xml = """\ + + + + square + 00360 + 4.0 + 31/12/2020 + + + circle + 00360 + + 31/12/2021 + + + triangle + 00180 + 3.0 + 31/12/2022 + +""" + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": to_datetime(["2020-12-31", "2021-12-31", "2022-12-31"]), + } + ) + + with tm.assert_produces_warning( + UserWarning, match="Parsing dates in %d/%m/%Y format" + ): + df_result = read_xml(StringIO(xml), parse_dates=["date"], parser=parser) + df_iter = read_xml_iterparse( + xml, + parse_dates=["date"], + parser=parser, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_wrong_parse_dates_type(xml_books, parser, iterparse): + with pytest.raises( + TypeError, match=("Only booleans, lists, and dictionaries are accepted") + ): + read_xml(xml_books, parse_dates={"date"}, parser=parser, iterparse=iterparse)