Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_177_mp_rank_00_optim_states.pt +3 -0
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_20_mp_rank_03_optim_states.pt +3 -0
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_214_mp_rank_01_optim_states.pt +3 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_indexing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/conftest.py +68 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_arithmetic.py +385 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_comparison.py +39 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_concat.py +69 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_construction.py +245 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_dtypes.py +294 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_function.py +203 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_reduction.py +125 -0
- venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_repr.py +67 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/conftest.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/generate_legacy_storage_files.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_clipboard.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_common.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_feather.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gbq.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gcs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_html.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_http_headers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_orc.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_parquet.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_pickle.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_s3.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_sql.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_stata.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py +130 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py +21 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py +873 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py +543 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py +1087 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc +0 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_177_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:163e02a237da82f144fc1024f84d195c18ccdc31ff7c654a8a82571cec6f7c7d
|
3 |
+
size 41830148
|
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_20_mp_rank_03_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3ab0fc2e5a020ab5fffe26c86381c4dfe24605997577c2a0ddd45679a9821eb0
|
3 |
+
size 41830330
|
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_214_mp_rank_01_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8d3ba90236e9d4be14419a32fa3f527d664fe39c31255ad308838e8f878aba5
|
3 |
+
size 41830148
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_indexing.cpython-310.pyc
ADDED
Binary file (804 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/conftest.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
from pandas.core.arrays.integer import (
|
6 |
+
Int8Dtype,
|
7 |
+
Int16Dtype,
|
8 |
+
Int32Dtype,
|
9 |
+
Int64Dtype,
|
10 |
+
UInt8Dtype,
|
11 |
+
UInt16Dtype,
|
12 |
+
UInt32Dtype,
|
13 |
+
UInt64Dtype,
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
@pytest.fixture(
|
18 |
+
params=[
|
19 |
+
Int8Dtype,
|
20 |
+
Int16Dtype,
|
21 |
+
Int32Dtype,
|
22 |
+
Int64Dtype,
|
23 |
+
UInt8Dtype,
|
24 |
+
UInt16Dtype,
|
25 |
+
UInt32Dtype,
|
26 |
+
UInt64Dtype,
|
27 |
+
]
|
28 |
+
)
|
29 |
+
def dtype(request):
|
30 |
+
"""Parametrized fixture returning integer 'dtype'"""
|
31 |
+
return request.param()
|
32 |
+
|
33 |
+
|
34 |
+
@pytest.fixture
|
35 |
+
def data(dtype):
|
36 |
+
"""
|
37 |
+
Fixture returning 'data' array with valid and missing values according to
|
38 |
+
parametrized integer 'dtype'.
|
39 |
+
|
40 |
+
Used to test dtype conversion with and without missing values.
|
41 |
+
"""
|
42 |
+
return pd.array(
|
43 |
+
list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100],
|
44 |
+
dtype=dtype,
|
45 |
+
)
|
46 |
+
|
47 |
+
|
48 |
+
@pytest.fixture
|
49 |
+
def data_missing(dtype):
|
50 |
+
"""
|
51 |
+
Fixture returning array with exactly one NaN and one valid integer,
|
52 |
+
according to parametrized integer 'dtype'.
|
53 |
+
|
54 |
+
Used to test dtype conversion with and without missing values.
|
55 |
+
"""
|
56 |
+
return pd.array([np.nan, 1], dtype=dtype)
|
57 |
+
|
58 |
+
|
59 |
+
@pytest.fixture(params=["data", "data_missing"])
|
60 |
+
def all_data(request, data, data_missing):
|
61 |
+
"""Parametrized fixture returning 'data' or 'data_missing' integer arrays.
|
62 |
+
|
63 |
+
Used to test dtype conversion with and without missing values.
|
64 |
+
"""
|
65 |
+
if request.param == "data":
|
66 |
+
return data
|
67 |
+
elif request.param == "data_missing":
|
68 |
+
return data_missing
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_arithmetic.py
ADDED
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.core import ops
|
9 |
+
from pandas.core.arrays import FloatingArray
|
10 |
+
|
11 |
+
# Basic test for the arithmetic array ops
|
12 |
+
# -----------------------------------------------------------------------------
|
13 |
+
|
14 |
+
|
15 |
+
@pytest.mark.parametrize(
|
16 |
+
"opname, exp",
|
17 |
+
[("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])],
|
18 |
+
ids=["add", "mul"],
|
19 |
+
)
|
20 |
+
def test_add_mul(dtype, opname, exp):
|
21 |
+
a = pd.array([0, 1, None, 3, 4], dtype=dtype)
|
22 |
+
b = pd.array([1, 2, 3, None, 5], dtype=dtype)
|
23 |
+
|
24 |
+
# array / array
|
25 |
+
expected = pd.array(exp, dtype=dtype)
|
26 |
+
|
27 |
+
op = getattr(operator, opname)
|
28 |
+
result = op(a, b)
|
29 |
+
tm.assert_extension_array_equal(result, expected)
|
30 |
+
|
31 |
+
op = getattr(ops, "r" + opname)
|
32 |
+
result = op(a, b)
|
33 |
+
tm.assert_extension_array_equal(result, expected)
|
34 |
+
|
35 |
+
|
36 |
+
def test_sub(dtype):
|
37 |
+
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
|
38 |
+
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
|
39 |
+
|
40 |
+
result = a - b
|
41 |
+
expected = pd.array([1, 1, None, None, 1], dtype=dtype)
|
42 |
+
tm.assert_extension_array_equal(result, expected)
|
43 |
+
|
44 |
+
|
45 |
+
def test_div(dtype):
|
46 |
+
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
|
47 |
+
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
|
48 |
+
|
49 |
+
result = a / b
|
50 |
+
expected = pd.array([np.inf, 2, None, None, 1.25], dtype="Float64")
|
51 |
+
tm.assert_extension_array_equal(result, expected)
|
52 |
+
|
53 |
+
|
54 |
+
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
|
55 |
+
def test_divide_by_zero(zero, negative):
|
56 |
+
# https://github.com/pandas-dev/pandas/issues/27398, GH#22793
|
57 |
+
a = pd.array([0, 1, -1, None], dtype="Int64")
|
58 |
+
result = a / zero
|
59 |
+
expected = FloatingArray(
|
60 |
+
np.array([np.nan, np.inf, -np.inf, 1], dtype="float64"),
|
61 |
+
np.array([False, False, False, True]),
|
62 |
+
)
|
63 |
+
if negative:
|
64 |
+
expected *= -1
|
65 |
+
tm.assert_extension_array_equal(result, expected)
|
66 |
+
|
67 |
+
|
68 |
+
def test_floordiv(dtype):
|
69 |
+
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
|
70 |
+
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
|
71 |
+
|
72 |
+
result = a // b
|
73 |
+
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
|
74 |
+
expected = pd.array([0, 2, None, None, 1], dtype=dtype)
|
75 |
+
tm.assert_extension_array_equal(result, expected)
|
76 |
+
|
77 |
+
|
78 |
+
def test_floordiv_by_int_zero_no_mask(any_int_ea_dtype):
|
79 |
+
# GH 48223: Aligns with non-masked floordiv
|
80 |
+
# but differs from numpy
|
81 |
+
# https://github.com/pandas-dev/pandas/issues/30188#issuecomment-564452740
|
82 |
+
ser = pd.Series([0, 1], dtype=any_int_ea_dtype)
|
83 |
+
result = 1 // ser
|
84 |
+
expected = pd.Series([np.inf, 1.0], dtype="Float64")
|
85 |
+
tm.assert_series_equal(result, expected)
|
86 |
+
|
87 |
+
ser_non_nullable = ser.astype(ser.dtype.numpy_dtype)
|
88 |
+
result = 1 // ser_non_nullable
|
89 |
+
expected = expected.astype(np.float64)
|
90 |
+
tm.assert_series_equal(result, expected)
|
91 |
+
|
92 |
+
|
93 |
+
def test_mod(dtype):
|
94 |
+
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
|
95 |
+
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
|
96 |
+
|
97 |
+
result = a % b
|
98 |
+
expected = pd.array([0, 0, None, None, 1], dtype=dtype)
|
99 |
+
tm.assert_extension_array_equal(result, expected)
|
100 |
+
|
101 |
+
|
102 |
+
def test_pow_scalar():
|
103 |
+
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
|
104 |
+
result = a**0
|
105 |
+
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
|
106 |
+
tm.assert_extension_array_equal(result, expected)
|
107 |
+
|
108 |
+
result = a**1
|
109 |
+
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
|
110 |
+
tm.assert_extension_array_equal(result, expected)
|
111 |
+
|
112 |
+
result = a**pd.NA
|
113 |
+
expected = pd.array([None, None, 1, None, None], dtype="Int64")
|
114 |
+
tm.assert_extension_array_equal(result, expected)
|
115 |
+
|
116 |
+
result = a**np.nan
|
117 |
+
expected = FloatingArray(
|
118 |
+
np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64"),
|
119 |
+
np.array([False, False, False, True, False]),
|
120 |
+
)
|
121 |
+
tm.assert_extension_array_equal(result, expected)
|
122 |
+
|
123 |
+
# reversed
|
124 |
+
a = a[1:] # Can't raise integers to negative powers.
|
125 |
+
|
126 |
+
result = 0**a
|
127 |
+
expected = pd.array([1, 0, None, 0], dtype="Int64")
|
128 |
+
tm.assert_extension_array_equal(result, expected)
|
129 |
+
|
130 |
+
result = 1**a
|
131 |
+
expected = pd.array([1, 1, 1, 1], dtype="Int64")
|
132 |
+
tm.assert_extension_array_equal(result, expected)
|
133 |
+
|
134 |
+
result = pd.NA**a
|
135 |
+
expected = pd.array([1, None, None, None], dtype="Int64")
|
136 |
+
tm.assert_extension_array_equal(result, expected)
|
137 |
+
|
138 |
+
result = np.nan**a
|
139 |
+
expected = FloatingArray(
|
140 |
+
np.array([1, np.nan, np.nan, np.nan], dtype="float64"),
|
141 |
+
np.array([False, False, True, False]),
|
142 |
+
)
|
143 |
+
tm.assert_extension_array_equal(result, expected)
|
144 |
+
|
145 |
+
|
146 |
+
def test_pow_array():
|
147 |
+
a = pd.array([0, 0, 0, 1, 1, 1, None, None, None])
|
148 |
+
b = pd.array([0, 1, None, 0, 1, None, 0, 1, None])
|
149 |
+
result = a**b
|
150 |
+
expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None])
|
151 |
+
tm.assert_extension_array_equal(result, expected)
|
152 |
+
|
153 |
+
|
154 |
+
def test_rpow_one_to_na():
|
155 |
+
# https://github.com/pandas-dev/pandas/issues/22022
|
156 |
+
# https://github.com/pandas-dev/pandas/issues/29997
|
157 |
+
arr = pd.array([np.nan, np.nan], dtype="Int64")
|
158 |
+
result = np.array([1.0, 2.0]) ** arr
|
159 |
+
expected = pd.array([1.0, np.nan], dtype="Float64")
|
160 |
+
tm.assert_extension_array_equal(result, expected)
|
161 |
+
|
162 |
+
|
163 |
+
@pytest.mark.parametrize("other", [0, 0.5])
|
164 |
+
def test_numpy_zero_dim_ndarray(other):
|
165 |
+
arr = pd.array([1, None, 2])
|
166 |
+
result = arr + np.array(other)
|
167 |
+
expected = arr + other
|
168 |
+
tm.assert_equal(result, expected)
|
169 |
+
|
170 |
+
|
171 |
+
# Test generic characteristics / errors
|
172 |
+
# -----------------------------------------------------------------------------
|
173 |
+
|
174 |
+
|
175 |
+
def test_error_invalid_values(data, all_arithmetic_operators, using_infer_string):
|
176 |
+
op = all_arithmetic_operators
|
177 |
+
s = pd.Series(data)
|
178 |
+
ops = getattr(s, op)
|
179 |
+
|
180 |
+
if using_infer_string:
|
181 |
+
import pyarrow as pa
|
182 |
+
|
183 |
+
errs = (TypeError, pa.lib.ArrowNotImplementedError, NotImplementedError)
|
184 |
+
else:
|
185 |
+
errs = TypeError
|
186 |
+
|
187 |
+
# invalid scalars
|
188 |
+
msg = "|".join(
|
189 |
+
[
|
190 |
+
r"can only perform ops with numeric values",
|
191 |
+
r"IntegerArray cannot perform the operation mod",
|
192 |
+
r"unsupported operand type",
|
193 |
+
r"can only concatenate str \(not \"int\"\) to str",
|
194 |
+
"not all arguments converted during string",
|
195 |
+
"ufunc '.*' not supported for the input types, and the inputs could not",
|
196 |
+
"ufunc '.*' did not contain a loop with signature matching types",
|
197 |
+
"Addition/subtraction of integers and integer-arrays with Timestamp",
|
198 |
+
"has no kernel",
|
199 |
+
"not implemented",
|
200 |
+
"The 'out' kwarg is necessary. Use numpy.strings.multiply without it.",
|
201 |
+
]
|
202 |
+
)
|
203 |
+
with pytest.raises(errs, match=msg):
|
204 |
+
ops("foo")
|
205 |
+
with pytest.raises(errs, match=msg):
|
206 |
+
ops(pd.Timestamp("20180101"))
|
207 |
+
|
208 |
+
# invalid array-likes
|
209 |
+
str_ser = pd.Series("foo", index=s.index)
|
210 |
+
# with pytest.raises(TypeError, match=msg):
|
211 |
+
if (
|
212 |
+
all_arithmetic_operators
|
213 |
+
in [
|
214 |
+
"__mul__",
|
215 |
+
"__rmul__",
|
216 |
+
]
|
217 |
+
and not using_infer_string
|
218 |
+
): # (data[~data.isna()] >= 0).all():
|
219 |
+
res = ops(str_ser)
|
220 |
+
expected = pd.Series(["foo" * x for x in data], index=s.index)
|
221 |
+
expected = expected.fillna(np.nan)
|
222 |
+
# TODO: doing this fillna to keep tests passing as we make
|
223 |
+
# assert_almost_equal stricter, but the expected with pd.NA seems
|
224 |
+
# more-correct than np.nan here.
|
225 |
+
tm.assert_series_equal(res, expected)
|
226 |
+
else:
|
227 |
+
with pytest.raises(errs, match=msg):
|
228 |
+
ops(str_ser)
|
229 |
+
|
230 |
+
msg = "|".join(
|
231 |
+
[
|
232 |
+
"can only perform ops with numeric values",
|
233 |
+
"cannot perform .* with this index type: DatetimeArray",
|
234 |
+
"Addition/subtraction of integers and integer-arrays "
|
235 |
+
"with DatetimeArray is no longer supported. *",
|
236 |
+
"unsupported operand type",
|
237 |
+
r"can only concatenate str \(not \"int\"\) to str",
|
238 |
+
"not all arguments converted during string",
|
239 |
+
"cannot subtract DatetimeArray from ndarray",
|
240 |
+
"has no kernel",
|
241 |
+
"not implemented",
|
242 |
+
]
|
243 |
+
)
|
244 |
+
with pytest.raises(errs, match=msg):
|
245 |
+
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
|
246 |
+
|
247 |
+
|
248 |
+
# Various
|
249 |
+
# -----------------------------------------------------------------------------
|
250 |
+
|
251 |
+
|
252 |
+
# TODO test unsigned overflow
|
253 |
+
|
254 |
+
|
255 |
+
def test_arith_coerce_scalar(data, all_arithmetic_operators):
|
256 |
+
op = tm.get_op_from_name(all_arithmetic_operators)
|
257 |
+
s = pd.Series(data)
|
258 |
+
other = 0.01
|
259 |
+
|
260 |
+
result = op(s, other)
|
261 |
+
expected = op(s.astype(float), other)
|
262 |
+
expected = expected.astype("Float64")
|
263 |
+
|
264 |
+
# rmod results in NaN that wasn't NA in original nullable Series -> unmask it
|
265 |
+
if all_arithmetic_operators == "__rmod__":
|
266 |
+
mask = (s == 0).fillna(False).to_numpy(bool)
|
267 |
+
expected.array._mask[mask] = False
|
268 |
+
|
269 |
+
tm.assert_series_equal(result, expected)
|
270 |
+
|
271 |
+
|
272 |
+
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
|
273 |
+
def test_arithmetic_conversion(all_arithmetic_operators, other):
|
274 |
+
# if we have a float operand we should have a float result
|
275 |
+
# if that is equal to an integer
|
276 |
+
op = tm.get_op_from_name(all_arithmetic_operators)
|
277 |
+
|
278 |
+
s = pd.Series([1, 2, 3], dtype="Int64")
|
279 |
+
result = op(s, other)
|
280 |
+
assert result.dtype == "Float64"
|
281 |
+
|
282 |
+
|
283 |
+
def test_cross_type_arithmetic():
|
284 |
+
df = pd.DataFrame(
|
285 |
+
{
|
286 |
+
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
|
287 |
+
"B": pd.Series([1, np.nan, 3], dtype="UInt8"),
|
288 |
+
"C": [1, 2, 3],
|
289 |
+
}
|
290 |
+
)
|
291 |
+
|
292 |
+
result = df.A + df.C
|
293 |
+
expected = pd.Series([2, 4, np.nan], dtype="Int64")
|
294 |
+
tm.assert_series_equal(result, expected)
|
295 |
+
|
296 |
+
result = (df.A + df.C) * 3 == 12
|
297 |
+
expected = pd.Series([False, True, None], dtype="boolean")
|
298 |
+
tm.assert_series_equal(result, expected)
|
299 |
+
|
300 |
+
result = df.A + df.B
|
301 |
+
expected = pd.Series([2, np.nan, np.nan], dtype="Int64")
|
302 |
+
tm.assert_series_equal(result, expected)
|
303 |
+
|
304 |
+
|
305 |
+
@pytest.mark.parametrize("op", ["mean"])
|
306 |
+
def test_reduce_to_float(op):
|
307 |
+
# some reduce ops always return float, even if the result
|
308 |
+
# is a rounded number
|
309 |
+
df = pd.DataFrame(
|
310 |
+
{
|
311 |
+
"A": ["a", "b", "b"],
|
312 |
+
"B": [1, None, 3],
|
313 |
+
"C": pd.array([1, None, 3], dtype="Int64"),
|
314 |
+
}
|
315 |
+
)
|
316 |
+
|
317 |
+
# op
|
318 |
+
result = getattr(df.C, op)()
|
319 |
+
assert isinstance(result, float)
|
320 |
+
|
321 |
+
# groupby
|
322 |
+
result = getattr(df.groupby("A"), op)()
|
323 |
+
|
324 |
+
expected = pd.DataFrame(
|
325 |
+
{"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Float64")},
|
326 |
+
index=pd.Index(["a", "b"], name="A"),
|
327 |
+
)
|
328 |
+
tm.assert_frame_equal(result, expected)
|
329 |
+
|
330 |
+
|
331 |
+
@pytest.mark.parametrize(
|
332 |
+
"source, neg_target, abs_target",
|
333 |
+
[
|
334 |
+
([1, 2, 3], [-1, -2, -3], [1, 2, 3]),
|
335 |
+
([1, 2, None], [-1, -2, None], [1, 2, None]),
|
336 |
+
([-1, 0, 1], [1, 0, -1], [1, 0, 1]),
|
337 |
+
],
|
338 |
+
)
|
339 |
+
def test_unary_int_operators(any_signed_int_ea_dtype, source, neg_target, abs_target):
|
340 |
+
dtype = any_signed_int_ea_dtype
|
341 |
+
arr = pd.array(source, dtype=dtype)
|
342 |
+
neg_result, pos_result, abs_result = -arr, +arr, abs(arr)
|
343 |
+
neg_target = pd.array(neg_target, dtype=dtype)
|
344 |
+
abs_target = pd.array(abs_target, dtype=dtype)
|
345 |
+
|
346 |
+
tm.assert_extension_array_equal(neg_result, neg_target)
|
347 |
+
tm.assert_extension_array_equal(pos_result, arr)
|
348 |
+
assert not tm.shares_memory(pos_result, arr)
|
349 |
+
tm.assert_extension_array_equal(abs_result, abs_target)
|
350 |
+
|
351 |
+
|
352 |
+
def test_values_multiplying_large_series_by_NA():
|
353 |
+
# GH#33701
|
354 |
+
|
355 |
+
result = pd.NA * pd.Series(np.zeros(10001))
|
356 |
+
expected = pd.Series([pd.NA] * 10001)
|
357 |
+
|
358 |
+
tm.assert_series_equal(result, expected)
|
359 |
+
|
360 |
+
|
361 |
+
def test_bitwise(dtype):
|
362 |
+
left = pd.array([1, None, 3, 4], dtype=dtype)
|
363 |
+
right = pd.array([None, 3, 5, 4], dtype=dtype)
|
364 |
+
|
365 |
+
result = left | right
|
366 |
+
expected = pd.array([None, None, 3 | 5, 4 | 4], dtype=dtype)
|
367 |
+
tm.assert_extension_array_equal(result, expected)
|
368 |
+
|
369 |
+
result = left & right
|
370 |
+
expected = pd.array([None, None, 3 & 5, 4 & 4], dtype=dtype)
|
371 |
+
tm.assert_extension_array_equal(result, expected)
|
372 |
+
|
373 |
+
result = left ^ right
|
374 |
+
expected = pd.array([None, None, 3 ^ 5, 4 ^ 4], dtype=dtype)
|
375 |
+
tm.assert_extension_array_equal(result, expected)
|
376 |
+
|
377 |
+
# TODO: desired behavior when operating with boolean? defer?
|
378 |
+
|
379 |
+
floats = right.astype("Float64")
|
380 |
+
with pytest.raises(TypeError, match="unsupported operand type"):
|
381 |
+
left | floats
|
382 |
+
with pytest.raises(TypeError, match="unsupported operand type"):
|
383 |
+
left & floats
|
384 |
+
with pytest.raises(TypeError, match="unsupported operand type"):
|
385 |
+
left ^ floats
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_comparison.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
import pandas._testing as tm
|
5 |
+
from pandas.tests.arrays.masked_shared import (
|
6 |
+
ComparisonOps,
|
7 |
+
NumericOps,
|
8 |
+
)
|
9 |
+
|
10 |
+
|
11 |
+
class TestComparisonOps(NumericOps, ComparisonOps):
|
12 |
+
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
|
13 |
+
def test_scalar(self, other, comparison_op, dtype):
|
14 |
+
ComparisonOps.test_scalar(self, other, comparison_op, dtype)
|
15 |
+
|
16 |
+
def test_compare_to_int(self, dtype, comparison_op):
|
17 |
+
# GH 28930
|
18 |
+
op_name = f"__{comparison_op.__name__}__"
|
19 |
+
s1 = pd.Series([1, None, 3], dtype=dtype)
|
20 |
+
s2 = pd.Series([1, None, 3], dtype="float")
|
21 |
+
|
22 |
+
method = getattr(s1, op_name)
|
23 |
+
result = method(2)
|
24 |
+
|
25 |
+
method = getattr(s2, op_name)
|
26 |
+
expected = method(2).astype("boolean")
|
27 |
+
expected[s2.isna()] = pd.NA
|
28 |
+
|
29 |
+
tm.assert_series_equal(result, expected)
|
30 |
+
|
31 |
+
|
32 |
+
def test_equals():
|
33 |
+
# GH-30652
|
34 |
+
# equals is generally tested in /tests/extension/base/methods, but this
|
35 |
+
# specifically tests that two arrays of the same class but different dtype
|
36 |
+
# do not evaluate equal
|
37 |
+
a1 = pd.array([1, 2, None], dtype="Int64")
|
38 |
+
a2 = pd.array([1, 2, None], dtype="Int32")
|
39 |
+
assert a1.equals(a2) is False
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_concat.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
|
7 |
+
|
8 |
+
@pytest.mark.parametrize(
|
9 |
+
"to_concat_dtypes, result_dtype",
|
10 |
+
[
|
11 |
+
(["Int64", "Int64"], "Int64"),
|
12 |
+
(["UInt64", "UInt64"], "UInt64"),
|
13 |
+
(["Int8", "Int8"], "Int8"),
|
14 |
+
(["Int8", "Int16"], "Int16"),
|
15 |
+
(["UInt8", "Int8"], "Int16"),
|
16 |
+
(["Int32", "UInt32"], "Int64"),
|
17 |
+
(["Int64", "UInt64"], "Float64"),
|
18 |
+
(["Int64", "boolean"], "object"),
|
19 |
+
(["UInt8", "boolean"], "object"),
|
20 |
+
],
|
21 |
+
)
|
22 |
+
def test_concat_series(to_concat_dtypes, result_dtype):
|
23 |
+
# we expect the same dtypes as we would get with non-masked inputs,
|
24 |
+
# just masked where available.
|
25 |
+
|
26 |
+
result = pd.concat([pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes])
|
27 |
+
expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype(
|
28 |
+
result_dtype
|
29 |
+
)
|
30 |
+
tm.assert_series_equal(result, expected)
|
31 |
+
|
32 |
+
# order doesn't matter for result
|
33 |
+
result = pd.concat(
|
34 |
+
[pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes[::-1]]
|
35 |
+
)
|
36 |
+
expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype(
|
37 |
+
result_dtype
|
38 |
+
)
|
39 |
+
tm.assert_series_equal(result, expected)
|
40 |
+
|
41 |
+
|
42 |
+
@pytest.mark.parametrize(
|
43 |
+
"to_concat_dtypes, result_dtype",
|
44 |
+
[
|
45 |
+
(["Int64", "int64"], "Int64"),
|
46 |
+
(["UInt64", "uint64"], "UInt64"),
|
47 |
+
(["Int8", "int8"], "Int8"),
|
48 |
+
(["Int8", "int16"], "Int16"),
|
49 |
+
(["UInt8", "int8"], "Int16"),
|
50 |
+
(["Int32", "uint32"], "Int64"),
|
51 |
+
(["Int64", "uint64"], "Float64"),
|
52 |
+
(["Int64", "bool"], "object"),
|
53 |
+
(["UInt8", "bool"], "object"),
|
54 |
+
],
|
55 |
+
)
|
56 |
+
def test_concat_series_with_numpy(to_concat_dtypes, result_dtype):
|
57 |
+
# we expect the same dtypes as we would get with non-masked inputs,
|
58 |
+
# just masked where available.
|
59 |
+
|
60 |
+
s1 = pd.Series([0, 1, pd.NA], dtype=to_concat_dtypes[0])
|
61 |
+
s2 = pd.Series(np.array([0, 1], dtype=to_concat_dtypes[1]))
|
62 |
+
result = pd.concat([s1, s2], ignore_index=True)
|
63 |
+
expected = pd.Series([0, 1, pd.NA, 0, 1], dtype=object).astype(result_dtype)
|
64 |
+
tm.assert_series_equal(result, expected)
|
65 |
+
|
66 |
+
# order doesn't matter for result
|
67 |
+
result = pd.concat([s2, s1], ignore_index=True)
|
68 |
+
expected = pd.Series([0, 1, 0, 1, pd.NA], dtype=object).astype(result_dtype)
|
69 |
+
tm.assert_series_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_construction.py
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
from pandas.api.types import is_integer
|
7 |
+
from pandas.core.arrays import IntegerArray
|
8 |
+
from pandas.core.arrays.integer import (
|
9 |
+
Int8Dtype,
|
10 |
+
Int32Dtype,
|
11 |
+
Int64Dtype,
|
12 |
+
)
|
13 |
+
|
14 |
+
|
15 |
+
@pytest.fixture(params=[pd.array, IntegerArray._from_sequence])
|
16 |
+
def constructor(request):
|
17 |
+
"""Fixture returning parametrized IntegerArray from given sequence.
|
18 |
+
|
19 |
+
Used to test dtype conversions.
|
20 |
+
"""
|
21 |
+
return request.param
|
22 |
+
|
23 |
+
|
24 |
+
def test_uses_pandas_na():
|
25 |
+
a = pd.array([1, None], dtype=Int64Dtype())
|
26 |
+
assert a[1] is pd.NA
|
27 |
+
|
28 |
+
|
29 |
+
def test_from_dtype_from_float(data):
|
30 |
+
# construct from our dtype & string dtype
|
31 |
+
dtype = data.dtype
|
32 |
+
|
33 |
+
# from float
|
34 |
+
expected = pd.Series(data)
|
35 |
+
result = pd.Series(data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype))
|
36 |
+
tm.assert_series_equal(result, expected)
|
37 |
+
|
38 |
+
# from int / list
|
39 |
+
expected = pd.Series(data)
|
40 |
+
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
|
41 |
+
tm.assert_series_equal(result, expected)
|
42 |
+
|
43 |
+
# from int / array
|
44 |
+
expected = pd.Series(data).dropna().reset_index(drop=True)
|
45 |
+
dropped = np.array(data.dropna()).astype(np.dtype(dtype.type))
|
46 |
+
result = pd.Series(dropped, dtype=str(dtype))
|
47 |
+
tm.assert_series_equal(result, expected)
|
48 |
+
|
49 |
+
|
50 |
+
def test_conversions(data_missing):
|
51 |
+
# astype to object series
|
52 |
+
df = pd.DataFrame({"A": data_missing})
|
53 |
+
result = df["A"].astype("object")
|
54 |
+
expected = pd.Series(np.array([pd.NA, 1], dtype=object), name="A")
|
55 |
+
tm.assert_series_equal(result, expected)
|
56 |
+
|
57 |
+
# convert to object ndarray
|
58 |
+
# we assert that we are exactly equal
|
59 |
+
# including type conversions of scalars
|
60 |
+
result = df["A"].astype("object").values
|
61 |
+
expected = np.array([pd.NA, 1], dtype=object)
|
62 |
+
tm.assert_numpy_array_equal(result, expected)
|
63 |
+
|
64 |
+
for r, e in zip(result, expected):
|
65 |
+
if pd.isnull(r):
|
66 |
+
assert pd.isnull(e)
|
67 |
+
elif is_integer(r):
|
68 |
+
assert r == e
|
69 |
+
assert is_integer(e)
|
70 |
+
else:
|
71 |
+
assert r == e
|
72 |
+
assert type(r) == type(e)
|
73 |
+
|
74 |
+
|
75 |
+
def test_integer_array_constructor():
|
76 |
+
values = np.array([1, 2, 3, 4], dtype="int64")
|
77 |
+
mask = np.array([False, False, False, True], dtype="bool")
|
78 |
+
|
79 |
+
result = IntegerArray(values, mask)
|
80 |
+
expected = pd.array([1, 2, 3, np.nan], dtype="Int64")
|
81 |
+
tm.assert_extension_array_equal(result, expected)
|
82 |
+
|
83 |
+
msg = r".* should be .* numpy array. Use the 'pd.array' function instead"
|
84 |
+
with pytest.raises(TypeError, match=msg):
|
85 |
+
IntegerArray(values.tolist(), mask)
|
86 |
+
|
87 |
+
with pytest.raises(TypeError, match=msg):
|
88 |
+
IntegerArray(values, mask.tolist())
|
89 |
+
|
90 |
+
with pytest.raises(TypeError, match=msg):
|
91 |
+
IntegerArray(values.astype(float), mask)
|
92 |
+
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
|
93 |
+
with pytest.raises(TypeError, match=msg):
|
94 |
+
IntegerArray(values)
|
95 |
+
|
96 |
+
|
97 |
+
def test_integer_array_constructor_copy():
|
98 |
+
values = np.array([1, 2, 3, 4], dtype="int64")
|
99 |
+
mask = np.array([False, False, False, True], dtype="bool")
|
100 |
+
|
101 |
+
result = IntegerArray(values, mask)
|
102 |
+
assert result._data is values
|
103 |
+
assert result._mask is mask
|
104 |
+
|
105 |
+
result = IntegerArray(values, mask, copy=True)
|
106 |
+
assert result._data is not values
|
107 |
+
assert result._mask is not mask
|
108 |
+
|
109 |
+
|
110 |
+
@pytest.mark.parametrize(
|
111 |
+
"a, b",
|
112 |
+
[
|
113 |
+
([1, None], [1, np.nan]),
|
114 |
+
([None], [np.nan]),
|
115 |
+
([None, np.nan], [np.nan, np.nan]),
|
116 |
+
([np.nan, np.nan], [np.nan, np.nan]),
|
117 |
+
],
|
118 |
+
)
|
119 |
+
def test_to_integer_array_none_is_nan(a, b):
|
120 |
+
result = pd.array(a, dtype="Int64")
|
121 |
+
expected = pd.array(b, dtype="Int64")
|
122 |
+
tm.assert_extension_array_equal(result, expected)
|
123 |
+
|
124 |
+
|
125 |
+
@pytest.mark.parametrize(
|
126 |
+
"values",
|
127 |
+
[
|
128 |
+
["foo", "bar"],
|
129 |
+
"foo",
|
130 |
+
1,
|
131 |
+
1.0,
|
132 |
+
pd.date_range("20130101", periods=2),
|
133 |
+
np.array(["foo"]),
|
134 |
+
[[1, 2], [3, 4]],
|
135 |
+
[np.nan, {"a": 1}],
|
136 |
+
],
|
137 |
+
)
|
138 |
+
def test_to_integer_array_error(values):
|
139 |
+
# error in converting existing arrays to IntegerArrays
|
140 |
+
msg = "|".join(
|
141 |
+
[
|
142 |
+
r"cannot be converted to IntegerDtype",
|
143 |
+
r"invalid literal for int\(\) with base 10:",
|
144 |
+
r"values must be a 1D list-like",
|
145 |
+
r"Cannot pass scalar",
|
146 |
+
r"int\(\) argument must be a string",
|
147 |
+
]
|
148 |
+
)
|
149 |
+
with pytest.raises((ValueError, TypeError), match=msg):
|
150 |
+
pd.array(values, dtype="Int64")
|
151 |
+
|
152 |
+
with pytest.raises((ValueError, TypeError), match=msg):
|
153 |
+
IntegerArray._from_sequence(values)
|
154 |
+
|
155 |
+
|
156 |
+
def test_to_integer_array_inferred_dtype(constructor):
|
157 |
+
# if values has dtype -> respect it
|
158 |
+
result = constructor(np.array([1, 2], dtype="int8"))
|
159 |
+
assert result.dtype == Int8Dtype()
|
160 |
+
result = constructor(np.array([1, 2], dtype="int32"))
|
161 |
+
assert result.dtype == Int32Dtype()
|
162 |
+
|
163 |
+
# if values have no dtype -> always int64
|
164 |
+
result = constructor([1, 2])
|
165 |
+
assert result.dtype == Int64Dtype()
|
166 |
+
|
167 |
+
|
168 |
+
def test_to_integer_array_dtype_keyword(constructor):
|
169 |
+
result = constructor([1, 2], dtype="Int8")
|
170 |
+
assert result.dtype == Int8Dtype()
|
171 |
+
|
172 |
+
# if values has dtype -> override it
|
173 |
+
result = constructor(np.array([1, 2], dtype="int8"), dtype="Int32")
|
174 |
+
assert result.dtype == Int32Dtype()
|
175 |
+
|
176 |
+
|
177 |
+
def test_to_integer_array_float():
|
178 |
+
result = IntegerArray._from_sequence([1.0, 2.0], dtype="Int64")
|
179 |
+
expected = pd.array([1, 2], dtype="Int64")
|
180 |
+
tm.assert_extension_array_equal(result, expected)
|
181 |
+
|
182 |
+
with pytest.raises(TypeError, match="cannot safely cast non-equivalent"):
|
183 |
+
IntegerArray._from_sequence([1.5, 2.0], dtype="Int64")
|
184 |
+
|
185 |
+
# for float dtypes, the itemsize is not preserved
|
186 |
+
result = IntegerArray._from_sequence(
|
187 |
+
np.array([1.0, 2.0], dtype="float32"), dtype="Int64"
|
188 |
+
)
|
189 |
+
assert result.dtype == Int64Dtype()
|
190 |
+
|
191 |
+
|
192 |
+
def test_to_integer_array_str():
|
193 |
+
result = IntegerArray._from_sequence(["1", "2", None], dtype="Int64")
|
194 |
+
expected = pd.array([1, 2, np.nan], dtype="Int64")
|
195 |
+
tm.assert_extension_array_equal(result, expected)
|
196 |
+
|
197 |
+
with pytest.raises(
|
198 |
+
ValueError, match=r"invalid literal for int\(\) with base 10: .*"
|
199 |
+
):
|
200 |
+
IntegerArray._from_sequence(["1", "2", ""], dtype="Int64")
|
201 |
+
|
202 |
+
with pytest.raises(
|
203 |
+
ValueError, match=r"invalid literal for int\(\) with base 10: .*"
|
204 |
+
):
|
205 |
+
IntegerArray._from_sequence(["1.5", "2.0"], dtype="Int64")
|
206 |
+
|
207 |
+
|
208 |
+
@pytest.mark.parametrize(
|
209 |
+
"bool_values, int_values, target_dtype, expected_dtype",
|
210 |
+
[
|
211 |
+
([False, True], [0, 1], Int64Dtype(), Int64Dtype()),
|
212 |
+
([False, True], [0, 1], "Int64", Int64Dtype()),
|
213 |
+
([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()),
|
214 |
+
],
|
215 |
+
)
|
216 |
+
def test_to_integer_array_bool(
|
217 |
+
constructor, bool_values, int_values, target_dtype, expected_dtype
|
218 |
+
):
|
219 |
+
result = constructor(bool_values, dtype=target_dtype)
|
220 |
+
assert result.dtype == expected_dtype
|
221 |
+
expected = pd.array(int_values, dtype=target_dtype)
|
222 |
+
tm.assert_extension_array_equal(result, expected)
|
223 |
+
|
224 |
+
|
225 |
+
@pytest.mark.parametrize(
|
226 |
+
"values, to_dtype, result_dtype",
|
227 |
+
[
|
228 |
+
(np.array([1], dtype="int64"), None, Int64Dtype),
|
229 |
+
(np.array([1, np.nan]), None, Int64Dtype),
|
230 |
+
(np.array([1, np.nan]), "int8", Int8Dtype),
|
231 |
+
],
|
232 |
+
)
|
233 |
+
def test_to_integer_array(values, to_dtype, result_dtype):
|
234 |
+
# convert existing arrays to IntegerArrays
|
235 |
+
result = IntegerArray._from_sequence(values, dtype=to_dtype)
|
236 |
+
assert result.dtype == result_dtype()
|
237 |
+
expected = pd.array(values, dtype=result_dtype())
|
238 |
+
tm.assert_extension_array_equal(result, expected)
|
239 |
+
|
240 |
+
|
241 |
+
def test_integer_array_from_boolean():
|
242 |
+
# GH31104
|
243 |
+
expected = pd.array(np.array([True, False]), dtype="Int64")
|
244 |
+
result = pd.array(np.array([True, False], dtype=object), dtype="Int64")
|
245 |
+
tm.assert_extension_array_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_dtypes.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas.core.dtypes.generic import ABCIndex
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.core.arrays.integer import (
|
9 |
+
Int8Dtype,
|
10 |
+
UInt32Dtype,
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
def test_dtypes(dtype):
|
15 |
+
# smoke tests on auto dtype construction
|
16 |
+
|
17 |
+
if dtype.is_signed_integer:
|
18 |
+
assert np.dtype(dtype.type).kind == "i"
|
19 |
+
else:
|
20 |
+
assert np.dtype(dtype.type).kind == "u"
|
21 |
+
assert dtype.name is not None
|
22 |
+
|
23 |
+
|
24 |
+
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
|
25 |
+
def test_preserve_dtypes(op):
|
26 |
+
# for ops that enable (mean would actually work here
|
27 |
+
# but generally it is a float return value)
|
28 |
+
df = pd.DataFrame(
|
29 |
+
{
|
30 |
+
"A": ["a", "b", "b"],
|
31 |
+
"B": [1, None, 3],
|
32 |
+
"C": pd.array([1, None, 3], dtype="Int64"),
|
33 |
+
}
|
34 |
+
)
|
35 |
+
|
36 |
+
# op
|
37 |
+
result = getattr(df.C, op)()
|
38 |
+
if op in {"sum", "prod", "min", "max"}:
|
39 |
+
assert isinstance(result, np.int64)
|
40 |
+
else:
|
41 |
+
assert isinstance(result, int)
|
42 |
+
|
43 |
+
# groupby
|
44 |
+
result = getattr(df.groupby("A"), op)()
|
45 |
+
|
46 |
+
expected = pd.DataFrame(
|
47 |
+
{"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Int64")},
|
48 |
+
index=pd.Index(["a", "b"], name="A"),
|
49 |
+
)
|
50 |
+
tm.assert_frame_equal(result, expected)
|
51 |
+
|
52 |
+
|
53 |
+
def test_astype_nansafe():
|
54 |
+
# see gh-22343
|
55 |
+
arr = pd.array([np.nan, 1, 2], dtype="Int8")
|
56 |
+
msg = "cannot convert NA to integer"
|
57 |
+
|
58 |
+
with pytest.raises(ValueError, match=msg):
|
59 |
+
arr.astype("uint32")
|
60 |
+
|
61 |
+
|
62 |
+
@pytest.mark.parametrize("dropna", [True, False])
|
63 |
+
def test_construct_index(all_data, dropna):
|
64 |
+
# ensure that we do not coerce to different Index dtype or non-index
|
65 |
+
|
66 |
+
all_data = all_data[:10]
|
67 |
+
if dropna:
|
68 |
+
other = np.array(all_data[~all_data.isna()])
|
69 |
+
else:
|
70 |
+
other = all_data
|
71 |
+
|
72 |
+
result = pd.Index(pd.array(other, dtype=all_data.dtype))
|
73 |
+
expected = pd.Index(other, dtype=all_data.dtype)
|
74 |
+
assert all_data.dtype == expected.dtype # dont coerce to object
|
75 |
+
|
76 |
+
tm.assert_index_equal(result, expected)
|
77 |
+
|
78 |
+
|
79 |
+
@pytest.mark.parametrize("dropna", [True, False])
|
80 |
+
def test_astype_index(all_data, dropna):
|
81 |
+
# as an int/uint index to Index
|
82 |
+
|
83 |
+
all_data = all_data[:10]
|
84 |
+
if dropna:
|
85 |
+
other = all_data[~all_data.isna()]
|
86 |
+
else:
|
87 |
+
other = all_data
|
88 |
+
|
89 |
+
dtype = all_data.dtype
|
90 |
+
idx = pd.Index(np.array(other))
|
91 |
+
assert isinstance(idx, ABCIndex)
|
92 |
+
|
93 |
+
result = idx.astype(dtype)
|
94 |
+
expected = idx.astype(object).astype(dtype)
|
95 |
+
tm.assert_index_equal(result, expected)
|
96 |
+
|
97 |
+
|
98 |
+
def test_astype(all_data):
|
99 |
+
all_data = all_data[:10]
|
100 |
+
|
101 |
+
ints = all_data[~all_data.isna()]
|
102 |
+
mixed = all_data
|
103 |
+
dtype = Int8Dtype()
|
104 |
+
|
105 |
+
# coerce to same type - ints
|
106 |
+
s = pd.Series(ints)
|
107 |
+
result = s.astype(all_data.dtype)
|
108 |
+
expected = pd.Series(ints)
|
109 |
+
tm.assert_series_equal(result, expected)
|
110 |
+
|
111 |
+
# coerce to same other - ints
|
112 |
+
s = pd.Series(ints)
|
113 |
+
result = s.astype(dtype)
|
114 |
+
expected = pd.Series(ints, dtype=dtype)
|
115 |
+
tm.assert_series_equal(result, expected)
|
116 |
+
|
117 |
+
# coerce to same numpy_dtype - ints
|
118 |
+
s = pd.Series(ints)
|
119 |
+
result = s.astype(all_data.dtype.numpy_dtype)
|
120 |
+
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
|
121 |
+
tm.assert_series_equal(result, expected)
|
122 |
+
|
123 |
+
# coerce to same type - mixed
|
124 |
+
s = pd.Series(mixed)
|
125 |
+
result = s.astype(all_data.dtype)
|
126 |
+
expected = pd.Series(mixed)
|
127 |
+
tm.assert_series_equal(result, expected)
|
128 |
+
|
129 |
+
# coerce to same other - mixed
|
130 |
+
s = pd.Series(mixed)
|
131 |
+
result = s.astype(dtype)
|
132 |
+
expected = pd.Series(mixed, dtype=dtype)
|
133 |
+
tm.assert_series_equal(result, expected)
|
134 |
+
|
135 |
+
# coerce to same numpy_dtype - mixed
|
136 |
+
s = pd.Series(mixed)
|
137 |
+
msg = "cannot convert NA to integer"
|
138 |
+
with pytest.raises(ValueError, match=msg):
|
139 |
+
s.astype(all_data.dtype.numpy_dtype)
|
140 |
+
|
141 |
+
# coerce to object
|
142 |
+
s = pd.Series(mixed)
|
143 |
+
result = s.astype("object")
|
144 |
+
expected = pd.Series(np.asarray(mixed, dtype=object))
|
145 |
+
tm.assert_series_equal(result, expected)
|
146 |
+
|
147 |
+
|
148 |
+
def test_astype_copy():
|
149 |
+
arr = pd.array([1, 2, 3, None], dtype="Int64")
|
150 |
+
orig = pd.array([1, 2, 3, None], dtype="Int64")
|
151 |
+
|
152 |
+
# copy=True -> ensure both data and mask are actual copies
|
153 |
+
result = arr.astype("Int64", copy=True)
|
154 |
+
assert result is not arr
|
155 |
+
assert not tm.shares_memory(result, arr)
|
156 |
+
result[0] = 10
|
157 |
+
tm.assert_extension_array_equal(arr, orig)
|
158 |
+
result[0] = pd.NA
|
159 |
+
tm.assert_extension_array_equal(arr, orig)
|
160 |
+
|
161 |
+
# copy=False
|
162 |
+
result = arr.astype("Int64", copy=False)
|
163 |
+
assert result is arr
|
164 |
+
assert np.shares_memory(result._data, arr._data)
|
165 |
+
assert np.shares_memory(result._mask, arr._mask)
|
166 |
+
result[0] = 10
|
167 |
+
assert arr[0] == 10
|
168 |
+
result[0] = pd.NA
|
169 |
+
assert arr[0] is pd.NA
|
170 |
+
|
171 |
+
# astype to different dtype -> always needs a copy -> even with copy=False
|
172 |
+
# we need to ensure that also the mask is actually copied
|
173 |
+
arr = pd.array([1, 2, 3, None], dtype="Int64")
|
174 |
+
orig = pd.array([1, 2, 3, None], dtype="Int64")
|
175 |
+
|
176 |
+
result = arr.astype("Int32", copy=False)
|
177 |
+
assert not tm.shares_memory(result, arr)
|
178 |
+
result[0] = 10
|
179 |
+
tm.assert_extension_array_equal(arr, orig)
|
180 |
+
result[0] = pd.NA
|
181 |
+
tm.assert_extension_array_equal(arr, orig)
|
182 |
+
|
183 |
+
|
184 |
+
def test_astype_to_larger_numpy():
|
185 |
+
a = pd.array([1, 2], dtype="Int32")
|
186 |
+
result = a.astype("int64")
|
187 |
+
expected = np.array([1, 2], dtype="int64")
|
188 |
+
tm.assert_numpy_array_equal(result, expected)
|
189 |
+
|
190 |
+
a = pd.array([1, 2], dtype="UInt32")
|
191 |
+
result = a.astype("uint64")
|
192 |
+
expected = np.array([1, 2], dtype="uint64")
|
193 |
+
tm.assert_numpy_array_equal(result, expected)
|
194 |
+
|
195 |
+
|
196 |
+
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
|
197 |
+
def test_astype_specific_casting(dtype):
|
198 |
+
s = pd.Series([1, 2, 3], dtype="Int64")
|
199 |
+
result = s.astype(dtype)
|
200 |
+
expected = pd.Series([1, 2, 3], dtype=dtype)
|
201 |
+
tm.assert_series_equal(result, expected)
|
202 |
+
|
203 |
+
s = pd.Series([1, 2, 3, None], dtype="Int64")
|
204 |
+
result = s.astype(dtype)
|
205 |
+
expected = pd.Series([1, 2, 3, None], dtype=dtype)
|
206 |
+
tm.assert_series_equal(result, expected)
|
207 |
+
|
208 |
+
|
209 |
+
def test_astype_floating():
|
210 |
+
arr = pd.array([1, 2, None], dtype="Int64")
|
211 |
+
result = arr.astype("Float64")
|
212 |
+
expected = pd.array([1.0, 2.0, None], dtype="Float64")
|
213 |
+
tm.assert_extension_array_equal(result, expected)
|
214 |
+
|
215 |
+
|
216 |
+
def test_astype_dt64():
|
217 |
+
# GH#32435
|
218 |
+
arr = pd.array([1, 2, 3, pd.NA]) * 10**9
|
219 |
+
|
220 |
+
result = arr.astype("datetime64[ns]")
|
221 |
+
|
222 |
+
expected = np.array([1, 2, 3, "NaT"], dtype="M8[s]").astype("M8[ns]")
|
223 |
+
tm.assert_numpy_array_equal(result, expected)
|
224 |
+
|
225 |
+
|
226 |
+
def test_construct_cast_invalid(dtype):
|
227 |
+
msg = "cannot safely"
|
228 |
+
arr = [1.2, 2.3, 3.7]
|
229 |
+
with pytest.raises(TypeError, match=msg):
|
230 |
+
pd.array(arr, dtype=dtype)
|
231 |
+
|
232 |
+
with pytest.raises(TypeError, match=msg):
|
233 |
+
pd.Series(arr).astype(dtype)
|
234 |
+
|
235 |
+
arr = [1.2, 2.3, 3.7, np.nan]
|
236 |
+
with pytest.raises(TypeError, match=msg):
|
237 |
+
pd.array(arr, dtype=dtype)
|
238 |
+
|
239 |
+
with pytest.raises(TypeError, match=msg):
|
240 |
+
pd.Series(arr).astype(dtype)
|
241 |
+
|
242 |
+
|
243 |
+
@pytest.mark.parametrize("in_series", [True, False])
|
244 |
+
def test_to_numpy_na_nan(in_series):
|
245 |
+
a = pd.array([0, 1, None], dtype="Int64")
|
246 |
+
if in_series:
|
247 |
+
a = pd.Series(a)
|
248 |
+
|
249 |
+
result = a.to_numpy(dtype="float64", na_value=np.nan)
|
250 |
+
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
|
251 |
+
tm.assert_numpy_array_equal(result, expected)
|
252 |
+
|
253 |
+
result = a.to_numpy(dtype="int64", na_value=-1)
|
254 |
+
expected = np.array([0, 1, -1], dtype="int64")
|
255 |
+
tm.assert_numpy_array_equal(result, expected)
|
256 |
+
|
257 |
+
result = a.to_numpy(dtype="bool", na_value=False)
|
258 |
+
expected = np.array([False, True, False], dtype="bool")
|
259 |
+
tm.assert_numpy_array_equal(result, expected)
|
260 |
+
|
261 |
+
|
262 |
+
@pytest.mark.parametrize("in_series", [True, False])
|
263 |
+
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
|
264 |
+
def test_to_numpy_dtype(dtype, in_series):
|
265 |
+
a = pd.array([0, 1], dtype="Int64")
|
266 |
+
if in_series:
|
267 |
+
a = pd.Series(a)
|
268 |
+
|
269 |
+
result = a.to_numpy(dtype=dtype)
|
270 |
+
expected = np.array([0, 1], dtype=dtype)
|
271 |
+
tm.assert_numpy_array_equal(result, expected)
|
272 |
+
|
273 |
+
|
274 |
+
@pytest.mark.parametrize("dtype", ["int64", "bool"])
|
275 |
+
def test_to_numpy_na_raises(dtype):
|
276 |
+
a = pd.array([0, 1, None], dtype="Int64")
|
277 |
+
with pytest.raises(ValueError, match=dtype):
|
278 |
+
a.to_numpy(dtype=dtype)
|
279 |
+
|
280 |
+
|
281 |
+
def test_astype_str():
|
282 |
+
a = pd.array([1, 2, None], dtype="Int64")
|
283 |
+
expected = np.array(["1", "2", "<NA>"], dtype=f"{tm.ENDIAN}U21")
|
284 |
+
|
285 |
+
tm.assert_numpy_array_equal(a.astype(str), expected)
|
286 |
+
tm.assert_numpy_array_equal(a.astype("str"), expected)
|
287 |
+
|
288 |
+
|
289 |
+
def test_astype_boolean():
|
290 |
+
# https://github.com/pandas-dev/pandas/issues/31102
|
291 |
+
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
|
292 |
+
result = a.astype("boolean")
|
293 |
+
expected = pd.array([True, False, True, True, None], dtype="boolean")
|
294 |
+
tm.assert_extension_array_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_function.py
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
from pandas.core.arrays import FloatingArray
|
7 |
+
|
8 |
+
|
9 |
+
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
|
10 |
+
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
|
11 |
+
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign:RuntimeWarning")
|
12 |
+
def test_ufuncs_single_int(ufunc):
|
13 |
+
a = pd.array([1, 2, -3, np.nan])
|
14 |
+
result = ufunc(a)
|
15 |
+
expected = pd.array(ufunc(a.astype(float)), dtype="Int64")
|
16 |
+
tm.assert_extension_array_equal(result, expected)
|
17 |
+
|
18 |
+
s = pd.Series(a)
|
19 |
+
result = ufunc(s)
|
20 |
+
expected = pd.Series(pd.array(ufunc(a.astype(float)), dtype="Int64"))
|
21 |
+
tm.assert_series_equal(result, expected)
|
22 |
+
|
23 |
+
|
24 |
+
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
|
25 |
+
def test_ufuncs_single_float(ufunc):
|
26 |
+
a = pd.array([1, 2, -3, np.nan])
|
27 |
+
with np.errstate(invalid="ignore"):
|
28 |
+
result = ufunc(a)
|
29 |
+
expected = FloatingArray(ufunc(a.astype(float)), mask=a._mask)
|
30 |
+
tm.assert_extension_array_equal(result, expected)
|
31 |
+
|
32 |
+
s = pd.Series(a)
|
33 |
+
with np.errstate(invalid="ignore"):
|
34 |
+
result = ufunc(s)
|
35 |
+
expected = pd.Series(expected)
|
36 |
+
tm.assert_series_equal(result, expected)
|
37 |
+
|
38 |
+
|
39 |
+
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
|
40 |
+
def test_ufuncs_binary_int(ufunc):
|
41 |
+
# two IntegerArrays
|
42 |
+
a = pd.array([1, 2, -3, np.nan])
|
43 |
+
result = ufunc(a, a)
|
44 |
+
expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Int64")
|
45 |
+
tm.assert_extension_array_equal(result, expected)
|
46 |
+
|
47 |
+
# IntegerArray with numpy array
|
48 |
+
arr = np.array([1, 2, 3, 4])
|
49 |
+
result = ufunc(a, arr)
|
50 |
+
expected = pd.array(ufunc(a.astype(float), arr), dtype="Int64")
|
51 |
+
tm.assert_extension_array_equal(result, expected)
|
52 |
+
|
53 |
+
result = ufunc(arr, a)
|
54 |
+
expected = pd.array(ufunc(arr, a.astype(float)), dtype="Int64")
|
55 |
+
tm.assert_extension_array_equal(result, expected)
|
56 |
+
|
57 |
+
# IntegerArray with scalar
|
58 |
+
result = ufunc(a, 1)
|
59 |
+
expected = pd.array(ufunc(a.astype(float), 1), dtype="Int64")
|
60 |
+
tm.assert_extension_array_equal(result, expected)
|
61 |
+
|
62 |
+
result = ufunc(1, a)
|
63 |
+
expected = pd.array(ufunc(1, a.astype(float)), dtype="Int64")
|
64 |
+
tm.assert_extension_array_equal(result, expected)
|
65 |
+
|
66 |
+
|
67 |
+
def test_ufunc_binary_output():
|
68 |
+
a = pd.array([1, 2, np.nan])
|
69 |
+
result = np.modf(a)
|
70 |
+
expected = np.modf(a.to_numpy(na_value=np.nan, dtype="float"))
|
71 |
+
expected = (pd.array(expected[0]), pd.array(expected[1]))
|
72 |
+
|
73 |
+
assert isinstance(result, tuple)
|
74 |
+
assert len(result) == 2
|
75 |
+
|
76 |
+
for x, y in zip(result, expected):
|
77 |
+
tm.assert_extension_array_equal(x, y)
|
78 |
+
|
79 |
+
|
80 |
+
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
|
81 |
+
def test_ufunc_reduce_raises(values):
|
82 |
+
arr = pd.array(values)
|
83 |
+
|
84 |
+
res = np.add.reduce(arr)
|
85 |
+
expected = arr.sum(skipna=False)
|
86 |
+
tm.assert_almost_equal(res, expected)
|
87 |
+
|
88 |
+
|
89 |
+
@pytest.mark.parametrize(
|
90 |
+
"pandasmethname, kwargs",
|
91 |
+
[
|
92 |
+
("var", {"ddof": 0}),
|
93 |
+
("var", {"ddof": 1}),
|
94 |
+
("std", {"ddof": 0}),
|
95 |
+
("std", {"ddof": 1}),
|
96 |
+
("kurtosis", {}),
|
97 |
+
("skew", {}),
|
98 |
+
("sem", {}),
|
99 |
+
],
|
100 |
+
)
|
101 |
+
def test_stat_method(pandasmethname, kwargs):
|
102 |
+
s = pd.Series(data=[1, 2, 3, 4, 5, 6, np.nan, np.nan], dtype="Int64")
|
103 |
+
pandasmeth = getattr(s, pandasmethname)
|
104 |
+
result = pandasmeth(**kwargs)
|
105 |
+
s2 = pd.Series(data=[1, 2, 3, 4, 5, 6], dtype="Int64")
|
106 |
+
pandasmeth = getattr(s2, pandasmethname)
|
107 |
+
expected = pandasmeth(**kwargs)
|
108 |
+
assert expected == result
|
109 |
+
|
110 |
+
|
111 |
+
def test_value_counts_na():
|
112 |
+
arr = pd.array([1, 2, 1, pd.NA], dtype="Int64")
|
113 |
+
result = arr.value_counts(dropna=False)
|
114 |
+
ex_index = pd.Index([1, 2, pd.NA], dtype="Int64")
|
115 |
+
assert ex_index.dtype == "Int64"
|
116 |
+
expected = pd.Series([2, 1, 1], index=ex_index, dtype="Int64", name="count")
|
117 |
+
tm.assert_series_equal(result, expected)
|
118 |
+
|
119 |
+
result = arr.value_counts(dropna=True)
|
120 |
+
expected = pd.Series([2, 1], index=arr[:2], dtype="Int64", name="count")
|
121 |
+
assert expected.index.dtype == arr.dtype
|
122 |
+
tm.assert_series_equal(result, expected)
|
123 |
+
|
124 |
+
|
125 |
+
def test_value_counts_empty():
|
126 |
+
# https://github.com/pandas-dev/pandas/issues/33317
|
127 |
+
ser = pd.Series([], dtype="Int64")
|
128 |
+
result = ser.value_counts()
|
129 |
+
idx = pd.Index([], dtype=ser.dtype)
|
130 |
+
assert idx.dtype == ser.dtype
|
131 |
+
expected = pd.Series([], index=idx, dtype="Int64", name="count")
|
132 |
+
tm.assert_series_equal(result, expected)
|
133 |
+
|
134 |
+
|
135 |
+
def test_value_counts_with_normalize():
|
136 |
+
# GH 33172
|
137 |
+
ser = pd.Series([1, 2, 1, pd.NA], dtype="Int64")
|
138 |
+
result = ser.value_counts(normalize=True)
|
139 |
+
expected = pd.Series([2, 1], index=ser[:2], dtype="Float64", name="proportion") / 3
|
140 |
+
assert expected.index.dtype == ser.dtype
|
141 |
+
tm.assert_series_equal(result, expected)
|
142 |
+
|
143 |
+
|
144 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
145 |
+
@pytest.mark.parametrize("min_count", [0, 4])
|
146 |
+
def test_integer_array_sum(skipna, min_count, any_int_ea_dtype):
|
147 |
+
dtype = any_int_ea_dtype
|
148 |
+
arr = pd.array([1, 2, 3, None], dtype=dtype)
|
149 |
+
result = arr.sum(skipna=skipna, min_count=min_count)
|
150 |
+
if skipna and min_count == 0:
|
151 |
+
assert result == 6
|
152 |
+
else:
|
153 |
+
assert result is pd.NA
|
154 |
+
|
155 |
+
|
156 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
157 |
+
@pytest.mark.parametrize("method", ["min", "max"])
|
158 |
+
def test_integer_array_min_max(skipna, method, any_int_ea_dtype):
|
159 |
+
dtype = any_int_ea_dtype
|
160 |
+
arr = pd.array([0, 1, None], dtype=dtype)
|
161 |
+
func = getattr(arr, method)
|
162 |
+
result = func(skipna=skipna)
|
163 |
+
if skipna:
|
164 |
+
assert result == (0 if method == "min" else 1)
|
165 |
+
else:
|
166 |
+
assert result is pd.NA
|
167 |
+
|
168 |
+
|
169 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
170 |
+
@pytest.mark.parametrize("min_count", [0, 9])
|
171 |
+
def test_integer_array_prod(skipna, min_count, any_int_ea_dtype):
|
172 |
+
dtype = any_int_ea_dtype
|
173 |
+
arr = pd.array([1, 2, None], dtype=dtype)
|
174 |
+
result = arr.prod(skipna=skipna, min_count=min_count)
|
175 |
+
if skipna and min_count == 0:
|
176 |
+
assert result == 2
|
177 |
+
else:
|
178 |
+
assert result is pd.NA
|
179 |
+
|
180 |
+
|
181 |
+
@pytest.mark.parametrize(
|
182 |
+
"values, expected", [([1, 2, 3], 6), ([1, 2, 3, None], 6), ([None], 0)]
|
183 |
+
)
|
184 |
+
def test_integer_array_numpy_sum(values, expected):
|
185 |
+
arr = pd.array(values, dtype="Int64")
|
186 |
+
result = np.sum(arr)
|
187 |
+
assert result == expected
|
188 |
+
|
189 |
+
|
190 |
+
@pytest.mark.parametrize("op", ["sum", "prod", "min", "max"])
|
191 |
+
def test_dataframe_reductions(op):
|
192 |
+
# https://github.com/pandas-dev/pandas/pull/32867
|
193 |
+
# ensure the integers are not cast to float during reductions
|
194 |
+
df = pd.DataFrame({"a": pd.array([1, 2], dtype="Int64")})
|
195 |
+
result = df.max()
|
196 |
+
assert isinstance(result["a"], np.int64)
|
197 |
+
|
198 |
+
|
199 |
+
# TODO(jreback) - these need testing / are broken
|
200 |
+
|
201 |
+
# shift
|
202 |
+
|
203 |
+
# set_index (destroys type)
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_reduction.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
from pandas import (
|
6 |
+
DataFrame,
|
7 |
+
Series,
|
8 |
+
array,
|
9 |
+
)
|
10 |
+
import pandas._testing as tm
|
11 |
+
|
12 |
+
|
13 |
+
@pytest.mark.parametrize(
|
14 |
+
"op, expected",
|
15 |
+
[
|
16 |
+
["sum", np.int64(3)],
|
17 |
+
["prod", np.int64(2)],
|
18 |
+
["min", np.int64(1)],
|
19 |
+
["max", np.int64(2)],
|
20 |
+
["mean", np.float64(1.5)],
|
21 |
+
["median", np.float64(1.5)],
|
22 |
+
["var", np.float64(0.5)],
|
23 |
+
["std", np.float64(0.5**0.5)],
|
24 |
+
["skew", pd.NA],
|
25 |
+
["kurt", pd.NA],
|
26 |
+
["any", True],
|
27 |
+
["all", True],
|
28 |
+
],
|
29 |
+
)
|
30 |
+
def test_series_reductions(op, expected):
|
31 |
+
ser = Series([1, 2], dtype="Int64")
|
32 |
+
result = getattr(ser, op)()
|
33 |
+
tm.assert_equal(result, expected)
|
34 |
+
|
35 |
+
|
36 |
+
@pytest.mark.parametrize(
|
37 |
+
"op, expected",
|
38 |
+
[
|
39 |
+
["sum", Series([3], index=["a"], dtype="Int64")],
|
40 |
+
["prod", Series([2], index=["a"], dtype="Int64")],
|
41 |
+
["min", Series([1], index=["a"], dtype="Int64")],
|
42 |
+
["max", Series([2], index=["a"], dtype="Int64")],
|
43 |
+
["mean", Series([1.5], index=["a"], dtype="Float64")],
|
44 |
+
["median", Series([1.5], index=["a"], dtype="Float64")],
|
45 |
+
["var", Series([0.5], index=["a"], dtype="Float64")],
|
46 |
+
["std", Series([0.5**0.5], index=["a"], dtype="Float64")],
|
47 |
+
["skew", Series([pd.NA], index=["a"], dtype="Float64")],
|
48 |
+
["kurt", Series([pd.NA], index=["a"], dtype="Float64")],
|
49 |
+
["any", Series([True], index=["a"], dtype="boolean")],
|
50 |
+
["all", Series([True], index=["a"], dtype="boolean")],
|
51 |
+
],
|
52 |
+
)
|
53 |
+
def test_dataframe_reductions(op, expected):
|
54 |
+
df = DataFrame({"a": array([1, 2], dtype="Int64")})
|
55 |
+
result = getattr(df, op)()
|
56 |
+
tm.assert_series_equal(result, expected)
|
57 |
+
|
58 |
+
|
59 |
+
@pytest.mark.parametrize(
|
60 |
+
"op, expected",
|
61 |
+
[
|
62 |
+
["sum", array([1, 3], dtype="Int64")],
|
63 |
+
["prod", array([1, 3], dtype="Int64")],
|
64 |
+
["min", array([1, 3], dtype="Int64")],
|
65 |
+
["max", array([1, 3], dtype="Int64")],
|
66 |
+
["mean", array([1, 3], dtype="Float64")],
|
67 |
+
["median", array([1, 3], dtype="Float64")],
|
68 |
+
["var", array([pd.NA], dtype="Float64")],
|
69 |
+
["std", array([pd.NA], dtype="Float64")],
|
70 |
+
["skew", array([pd.NA], dtype="Float64")],
|
71 |
+
["any", array([True, True], dtype="boolean")],
|
72 |
+
["all", array([True, True], dtype="boolean")],
|
73 |
+
],
|
74 |
+
)
|
75 |
+
def test_groupby_reductions(op, expected):
|
76 |
+
df = DataFrame(
|
77 |
+
{
|
78 |
+
"A": ["a", "b", "b"],
|
79 |
+
"B": array([1, None, 3], dtype="Int64"),
|
80 |
+
}
|
81 |
+
)
|
82 |
+
result = getattr(df.groupby("A"), op)()
|
83 |
+
expected = DataFrame(expected, index=pd.Index(["a", "b"], name="A"), columns=["B"])
|
84 |
+
|
85 |
+
tm.assert_frame_equal(result, expected)
|
86 |
+
|
87 |
+
|
88 |
+
@pytest.mark.parametrize(
|
89 |
+
"op, expected",
|
90 |
+
[
|
91 |
+
["sum", Series([4, 4], index=["B", "C"], dtype="Float64")],
|
92 |
+
["prod", Series([3, 3], index=["B", "C"], dtype="Float64")],
|
93 |
+
["min", Series([1, 1], index=["B", "C"], dtype="Float64")],
|
94 |
+
["max", Series([3, 3], index=["B", "C"], dtype="Float64")],
|
95 |
+
["mean", Series([2, 2], index=["B", "C"], dtype="Float64")],
|
96 |
+
["median", Series([2, 2], index=["B", "C"], dtype="Float64")],
|
97 |
+
["var", Series([2, 2], index=["B", "C"], dtype="Float64")],
|
98 |
+
["std", Series([2**0.5, 2**0.5], index=["B", "C"], dtype="Float64")],
|
99 |
+
["skew", Series([pd.NA, pd.NA], index=["B", "C"], dtype="Float64")],
|
100 |
+
["kurt", Series([pd.NA, pd.NA], index=["B", "C"], dtype="Float64")],
|
101 |
+
["any", Series([True, True, True], index=["A", "B", "C"], dtype="boolean")],
|
102 |
+
["all", Series([True, True, True], index=["A", "B", "C"], dtype="boolean")],
|
103 |
+
],
|
104 |
+
)
|
105 |
+
def test_mixed_reductions(op, expected, using_infer_string):
|
106 |
+
if op in ["any", "all"] and using_infer_string:
|
107 |
+
expected = expected.astype("bool")
|
108 |
+
df = DataFrame(
|
109 |
+
{
|
110 |
+
"A": ["a", "b", "b"],
|
111 |
+
"B": [1, None, 3],
|
112 |
+
"C": array([1, None, 3], dtype="Int64"),
|
113 |
+
}
|
114 |
+
)
|
115 |
+
|
116 |
+
# series
|
117 |
+
result = getattr(df.C, op)()
|
118 |
+
tm.assert_equal(result, expected["C"])
|
119 |
+
|
120 |
+
# frame
|
121 |
+
if op in ["any", "all"]:
|
122 |
+
result = getattr(df, op)()
|
123 |
+
else:
|
124 |
+
result = getattr(df, op)(numeric_only=True)
|
125 |
+
tm.assert_series_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_repr.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
from pandas.core.arrays.integer import (
|
6 |
+
Int8Dtype,
|
7 |
+
Int16Dtype,
|
8 |
+
Int32Dtype,
|
9 |
+
Int64Dtype,
|
10 |
+
UInt8Dtype,
|
11 |
+
UInt16Dtype,
|
12 |
+
UInt32Dtype,
|
13 |
+
UInt64Dtype,
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
def test_dtypes(dtype):
|
18 |
+
# smoke tests on auto dtype construction
|
19 |
+
|
20 |
+
if dtype.is_signed_integer:
|
21 |
+
assert np.dtype(dtype.type).kind == "i"
|
22 |
+
else:
|
23 |
+
assert np.dtype(dtype.type).kind == "u"
|
24 |
+
assert dtype.name is not None
|
25 |
+
|
26 |
+
|
27 |
+
@pytest.mark.parametrize(
|
28 |
+
"dtype, expected",
|
29 |
+
[
|
30 |
+
(Int8Dtype(), "Int8Dtype()"),
|
31 |
+
(Int16Dtype(), "Int16Dtype()"),
|
32 |
+
(Int32Dtype(), "Int32Dtype()"),
|
33 |
+
(Int64Dtype(), "Int64Dtype()"),
|
34 |
+
(UInt8Dtype(), "UInt8Dtype()"),
|
35 |
+
(UInt16Dtype(), "UInt16Dtype()"),
|
36 |
+
(UInt32Dtype(), "UInt32Dtype()"),
|
37 |
+
(UInt64Dtype(), "UInt64Dtype()"),
|
38 |
+
],
|
39 |
+
)
|
40 |
+
def test_repr_dtype(dtype, expected):
|
41 |
+
assert repr(dtype) == expected
|
42 |
+
|
43 |
+
|
44 |
+
def test_repr_array():
|
45 |
+
result = repr(pd.array([1, None, 3]))
|
46 |
+
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
|
47 |
+
assert result == expected
|
48 |
+
|
49 |
+
|
50 |
+
def test_repr_array_long():
|
51 |
+
data = pd.array([1, 2, None] * 1000)
|
52 |
+
expected = (
|
53 |
+
"<IntegerArray>\n"
|
54 |
+
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
|
55 |
+
" ...\n"
|
56 |
+
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
|
57 |
+
"Length: 3000, dtype: Int64"
|
58 |
+
)
|
59 |
+
result = repr(data)
|
60 |
+
assert result == expected
|
61 |
+
|
62 |
+
|
63 |
+
def test_frame_repr(data_missing):
|
64 |
+
df = pd.DataFrame({"A": data_missing})
|
65 |
+
result = repr(df)
|
66 |
+
expected = " A\n0 <NA>\n1 1"
|
67 |
+
assert result == expected
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (183 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/conftest.cpython-310.pyc
ADDED
Binary file (5.36 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/generate_legacy_storage_files.cpython-310.pyc
ADDED
Binary file (8.29 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_clipboard.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_common.cpython-310.pyc
ADDED
Binary file (22.3 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc
ADDED
Binary file (11 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_feather.cpython-310.pyc
ADDED
Binary file (9.17 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc
ADDED
Binary file (9.35 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gbq.cpython-310.pyc
ADDED
Binary file (904 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gcs.cpython-310.pyc
ADDED
Binary file (7 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_html.cpython-310.pyc
ADDED
Binary file (52 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_http_headers.cpython-310.pyc
ADDED
Binary file (4.44 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_orc.cpython-310.pyc
ADDED
Binary file (9.98 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_parquet.cpython-310.pyc
ADDED
Binary file (40.7 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_pickle.cpython-310.pyc
ADDED
Binary file (20.6 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_s3.cpython-310.pyc
ADDED
Binary file (1.27 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc
ADDED
Binary file (4.76 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_sql.cpython-310.pyc
ADDED
Binary file (108 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_stata.cpython-310.pyc
ADDED
Binary file (81.7 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from io import (
|
2 |
+
BytesIO,
|
3 |
+
StringIO,
|
4 |
+
)
|
5 |
+
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
import pandas.util._test_decorators as td
|
9 |
+
|
10 |
+
import pandas as pd
|
11 |
+
import pandas._testing as tm
|
12 |
+
|
13 |
+
|
14 |
+
def test_compression_roundtrip(compression):
|
15 |
+
df = pd.DataFrame(
|
16 |
+
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
17 |
+
index=["A", "B"],
|
18 |
+
columns=["X", "Y", "Z"],
|
19 |
+
)
|
20 |
+
|
21 |
+
with tm.ensure_clean() as path:
|
22 |
+
df.to_json(path, compression=compression)
|
23 |
+
tm.assert_frame_equal(df, pd.read_json(path, compression=compression))
|
24 |
+
|
25 |
+
# explicitly ensure file was compressed.
|
26 |
+
with tm.decompress_file(path, compression) as fh:
|
27 |
+
result = fh.read().decode("utf8")
|
28 |
+
data = StringIO(result)
|
29 |
+
tm.assert_frame_equal(df, pd.read_json(data))
|
30 |
+
|
31 |
+
|
32 |
+
def test_read_zipped_json(datapath):
|
33 |
+
uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json")
|
34 |
+
uncompressed_df = pd.read_json(uncompressed_path)
|
35 |
+
|
36 |
+
compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip")
|
37 |
+
compressed_df = pd.read_json(compressed_path, compression="zip")
|
38 |
+
|
39 |
+
tm.assert_frame_equal(uncompressed_df, compressed_df)
|
40 |
+
|
41 |
+
|
42 |
+
@td.skip_if_not_us_locale
|
43 |
+
@pytest.mark.single_cpu
|
44 |
+
def test_with_s3_url(compression, s3_public_bucket, s3so):
|
45 |
+
# Bucket created in tests/io/conftest.py
|
46 |
+
df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
|
47 |
+
|
48 |
+
with tm.ensure_clean() as path:
|
49 |
+
df.to_json(path, compression=compression)
|
50 |
+
with open(path, "rb") as f:
|
51 |
+
s3_public_bucket.put_object(Key="test-1", Body=f)
|
52 |
+
|
53 |
+
roundtripped_df = pd.read_json(
|
54 |
+
f"s3://{s3_public_bucket.name}/test-1",
|
55 |
+
compression=compression,
|
56 |
+
storage_options=s3so,
|
57 |
+
)
|
58 |
+
tm.assert_frame_equal(df, roundtripped_df)
|
59 |
+
|
60 |
+
|
61 |
+
def test_lines_with_compression(compression):
|
62 |
+
with tm.ensure_clean() as path:
|
63 |
+
df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
|
64 |
+
df.to_json(path, orient="records", lines=True, compression=compression)
|
65 |
+
roundtripped_df = pd.read_json(path, lines=True, compression=compression)
|
66 |
+
tm.assert_frame_equal(df, roundtripped_df)
|
67 |
+
|
68 |
+
|
69 |
+
def test_chunksize_with_compression(compression):
|
70 |
+
with tm.ensure_clean() as path:
|
71 |
+
df = pd.read_json(StringIO('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}'))
|
72 |
+
df.to_json(path, orient="records", lines=True, compression=compression)
|
73 |
+
|
74 |
+
with pd.read_json(
|
75 |
+
path, lines=True, chunksize=1, compression=compression
|
76 |
+
) as res:
|
77 |
+
roundtripped_df = pd.concat(res)
|
78 |
+
tm.assert_frame_equal(df, roundtripped_df)
|
79 |
+
|
80 |
+
|
81 |
+
def test_write_unsupported_compression_type():
|
82 |
+
df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
|
83 |
+
with tm.ensure_clean() as path:
|
84 |
+
msg = "Unrecognized compression type: unsupported"
|
85 |
+
with pytest.raises(ValueError, match=msg):
|
86 |
+
df.to_json(path, compression="unsupported")
|
87 |
+
|
88 |
+
|
89 |
+
def test_read_unsupported_compression_type():
|
90 |
+
with tm.ensure_clean() as path:
|
91 |
+
msg = "Unrecognized compression type: unsupported"
|
92 |
+
with pytest.raises(ValueError, match=msg):
|
93 |
+
pd.read_json(path, compression="unsupported")
|
94 |
+
|
95 |
+
|
96 |
+
@pytest.mark.parametrize(
|
97 |
+
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
|
98 |
+
)
|
99 |
+
@pytest.mark.parametrize("to_infer", [True, False])
|
100 |
+
@pytest.mark.parametrize("read_infer", [True, False])
|
101 |
+
def test_to_json_compression(
|
102 |
+
compression_only, read_infer, to_infer, compression_to_extension, infer_string
|
103 |
+
):
|
104 |
+
with pd.option_context("future.infer_string", infer_string):
|
105 |
+
# see gh-15008
|
106 |
+
compression = compression_only
|
107 |
+
|
108 |
+
# We'll complete file extension subsequently.
|
109 |
+
filename = "test."
|
110 |
+
filename += compression_to_extension[compression]
|
111 |
+
|
112 |
+
df = pd.DataFrame({"A": [1]})
|
113 |
+
|
114 |
+
to_compression = "infer" if to_infer else compression
|
115 |
+
read_compression = "infer" if read_infer else compression
|
116 |
+
|
117 |
+
with tm.ensure_clean(filename) as path:
|
118 |
+
df.to_json(path, compression=to_compression)
|
119 |
+
result = pd.read_json(path, compression=read_compression)
|
120 |
+
tm.assert_frame_equal(result, df)
|
121 |
+
|
122 |
+
|
123 |
+
def test_to_json_compression_mode(compression):
|
124 |
+
# GH 39985 (read_json does not support user-provided binary files)
|
125 |
+
expected = pd.DataFrame({"A": [1]})
|
126 |
+
|
127 |
+
with BytesIO() as buffer:
|
128 |
+
expected.to_json(buffer, compression=compression)
|
129 |
+
# df = pd.read_json(buffer, compression=compression)
|
130 |
+
# tm.assert_frame_equal(expected, df)
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for the deprecated keyword arguments for `read_json`.
|
3 |
+
"""
|
4 |
+
from io import StringIO
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import pandas._testing as tm
|
8 |
+
|
9 |
+
from pandas.io.json import read_json
|
10 |
+
|
11 |
+
|
12 |
+
def test_good_kwargs():
|
13 |
+
df = pd.DataFrame({"A": [2, 4, 6], "B": [3, 6, 9]}, index=[0, 1, 2])
|
14 |
+
|
15 |
+
with tm.assert_produces_warning(None):
|
16 |
+
data1 = StringIO(df.to_json(orient="split"))
|
17 |
+
tm.assert_frame_equal(df, read_json(data1, orient="split"))
|
18 |
+
data2 = StringIO(df.to_json(orient="columns"))
|
19 |
+
tm.assert_frame_equal(df, read_json(data2, orient="columns"))
|
20 |
+
data3 = StringIO(df.to_json(orient="index"))
|
21 |
+
tm.assert_frame_equal(df, read_json(data3, orient="index"))
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py
ADDED
@@ -0,0 +1,873 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests for Table Schema integration."""
|
2 |
+
from collections import OrderedDict
|
3 |
+
from io import StringIO
|
4 |
+
import json
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas.core.dtypes.dtypes import (
|
10 |
+
CategoricalDtype,
|
11 |
+
DatetimeTZDtype,
|
12 |
+
PeriodDtype,
|
13 |
+
)
|
14 |
+
|
15 |
+
import pandas as pd
|
16 |
+
from pandas import DataFrame
|
17 |
+
import pandas._testing as tm
|
18 |
+
|
19 |
+
from pandas.io.json._table_schema import (
|
20 |
+
as_json_table_type,
|
21 |
+
build_table_schema,
|
22 |
+
convert_json_field_to_pandas_type,
|
23 |
+
convert_pandas_type_to_json_field,
|
24 |
+
set_default_names,
|
25 |
+
)
|
26 |
+
|
27 |
+
|
28 |
+
@pytest.fixture
|
29 |
+
def df_schema():
|
30 |
+
return DataFrame(
|
31 |
+
{
|
32 |
+
"A": [1, 2, 3, 4],
|
33 |
+
"B": ["a", "b", "c", "c"],
|
34 |
+
"C": pd.date_range("2016-01-01", freq="d", periods=4),
|
35 |
+
"D": pd.timedelta_range("1h", periods=4, freq="min"),
|
36 |
+
},
|
37 |
+
index=pd.Index(range(4), name="idx"),
|
38 |
+
)
|
39 |
+
|
40 |
+
|
41 |
+
@pytest.fixture
|
42 |
+
def df_table():
|
43 |
+
return DataFrame(
|
44 |
+
{
|
45 |
+
"A": [1, 2, 3, 4],
|
46 |
+
"B": ["a", "b", "c", "c"],
|
47 |
+
"C": pd.date_range("2016-01-01", freq="d", periods=4),
|
48 |
+
"D": pd.timedelta_range("1h", periods=4, freq="min"),
|
49 |
+
"E": pd.Series(pd.Categorical(["a", "b", "c", "c"])),
|
50 |
+
"F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)),
|
51 |
+
"G": [1.0, 2.0, 3, 4.0],
|
52 |
+
"H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"),
|
53 |
+
},
|
54 |
+
index=pd.Index(range(4), name="idx"),
|
55 |
+
)
|
56 |
+
|
57 |
+
|
58 |
+
class TestBuildSchema:
|
59 |
+
def test_build_table_schema(self, df_schema, using_infer_string):
|
60 |
+
result = build_table_schema(df_schema, version=False)
|
61 |
+
expected = {
|
62 |
+
"fields": [
|
63 |
+
{"name": "idx", "type": "integer"},
|
64 |
+
{"name": "A", "type": "integer"},
|
65 |
+
{"name": "B", "type": "string"},
|
66 |
+
{"name": "C", "type": "datetime"},
|
67 |
+
{"name": "D", "type": "duration"},
|
68 |
+
],
|
69 |
+
"primaryKey": ["idx"],
|
70 |
+
}
|
71 |
+
if using_infer_string:
|
72 |
+
expected["fields"][2] = {"name": "B", "type": "any", "extDtype": "string"}
|
73 |
+
assert result == expected
|
74 |
+
result = build_table_schema(df_schema)
|
75 |
+
assert "pandas_version" in result
|
76 |
+
|
77 |
+
def test_series(self):
|
78 |
+
s = pd.Series([1, 2, 3], name="foo")
|
79 |
+
result = build_table_schema(s, version=False)
|
80 |
+
expected = {
|
81 |
+
"fields": [
|
82 |
+
{"name": "index", "type": "integer"},
|
83 |
+
{"name": "foo", "type": "integer"},
|
84 |
+
],
|
85 |
+
"primaryKey": ["index"],
|
86 |
+
}
|
87 |
+
assert result == expected
|
88 |
+
result = build_table_schema(s)
|
89 |
+
assert "pandas_version" in result
|
90 |
+
|
91 |
+
def test_series_unnamed(self):
|
92 |
+
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
|
93 |
+
expected = {
|
94 |
+
"fields": [
|
95 |
+
{"name": "index", "type": "integer"},
|
96 |
+
{"name": "values", "type": "integer"},
|
97 |
+
],
|
98 |
+
"primaryKey": ["index"],
|
99 |
+
}
|
100 |
+
assert result == expected
|
101 |
+
|
102 |
+
def test_multiindex(self, df_schema, using_infer_string):
|
103 |
+
df = df_schema
|
104 |
+
idx = pd.MultiIndex.from_product([("a", "b"), (1, 2)])
|
105 |
+
df.index = idx
|
106 |
+
|
107 |
+
result = build_table_schema(df, version=False)
|
108 |
+
expected = {
|
109 |
+
"fields": [
|
110 |
+
{"name": "level_0", "type": "string"},
|
111 |
+
{"name": "level_1", "type": "integer"},
|
112 |
+
{"name": "A", "type": "integer"},
|
113 |
+
{"name": "B", "type": "string"},
|
114 |
+
{"name": "C", "type": "datetime"},
|
115 |
+
{"name": "D", "type": "duration"},
|
116 |
+
],
|
117 |
+
"primaryKey": ["level_0", "level_1"],
|
118 |
+
}
|
119 |
+
if using_infer_string:
|
120 |
+
expected["fields"][0] = {
|
121 |
+
"name": "level_0",
|
122 |
+
"type": "any",
|
123 |
+
"extDtype": "string",
|
124 |
+
}
|
125 |
+
expected["fields"][3] = {"name": "B", "type": "any", "extDtype": "string"}
|
126 |
+
assert result == expected
|
127 |
+
|
128 |
+
df.index.names = ["idx0", None]
|
129 |
+
expected["fields"][0]["name"] = "idx0"
|
130 |
+
expected["primaryKey"] = ["idx0", "level_1"]
|
131 |
+
result = build_table_schema(df, version=False)
|
132 |
+
assert result == expected
|
133 |
+
|
134 |
+
|
135 |
+
class TestTableSchemaType:
|
136 |
+
@pytest.mark.parametrize("int_type", [int, np.int16, np.int32, np.int64])
|
137 |
+
def test_as_json_table_type_int_data(self, int_type):
|
138 |
+
int_data = [1, 2, 3]
|
139 |
+
assert as_json_table_type(np.array(int_data, dtype=int_type).dtype) == "integer"
|
140 |
+
|
141 |
+
@pytest.mark.parametrize("float_type", [float, np.float16, np.float32, np.float64])
|
142 |
+
def test_as_json_table_type_float_data(self, float_type):
|
143 |
+
float_data = [1.0, 2.0, 3.0]
|
144 |
+
assert (
|
145 |
+
as_json_table_type(np.array(float_data, dtype=float_type).dtype) == "number"
|
146 |
+
)
|
147 |
+
|
148 |
+
@pytest.mark.parametrize("bool_type", [bool, np.bool_])
|
149 |
+
def test_as_json_table_type_bool_data(self, bool_type):
|
150 |
+
bool_data = [True, False]
|
151 |
+
assert (
|
152 |
+
as_json_table_type(np.array(bool_data, dtype=bool_type).dtype) == "boolean"
|
153 |
+
)
|
154 |
+
|
155 |
+
@pytest.mark.parametrize(
|
156 |
+
"date_data",
|
157 |
+
[
|
158 |
+
pd.to_datetime(["2016"]),
|
159 |
+
pd.to_datetime(["2016"], utc=True),
|
160 |
+
pd.Series(pd.to_datetime(["2016"])),
|
161 |
+
pd.Series(pd.to_datetime(["2016"], utc=True)),
|
162 |
+
pd.period_range("2016", freq="Y", periods=3),
|
163 |
+
],
|
164 |
+
)
|
165 |
+
def test_as_json_table_type_date_data(self, date_data):
|
166 |
+
assert as_json_table_type(date_data.dtype) == "datetime"
|
167 |
+
|
168 |
+
@pytest.mark.parametrize(
|
169 |
+
"str_data",
|
170 |
+
[pd.Series(["a", "b"], dtype=object), pd.Index(["a", "b"], dtype=object)],
|
171 |
+
)
|
172 |
+
def test_as_json_table_type_string_data(self, str_data):
|
173 |
+
assert as_json_table_type(str_data.dtype) == "string"
|
174 |
+
|
175 |
+
@pytest.mark.parametrize(
|
176 |
+
"cat_data",
|
177 |
+
[
|
178 |
+
pd.Categorical(["a"]),
|
179 |
+
pd.Categorical([1]),
|
180 |
+
pd.Series(pd.Categorical([1])),
|
181 |
+
pd.CategoricalIndex([1]),
|
182 |
+
pd.Categorical([1]),
|
183 |
+
],
|
184 |
+
)
|
185 |
+
def test_as_json_table_type_categorical_data(self, cat_data):
|
186 |
+
assert as_json_table_type(cat_data.dtype) == "any"
|
187 |
+
|
188 |
+
# ------
|
189 |
+
# dtypes
|
190 |
+
# ------
|
191 |
+
@pytest.mark.parametrize("int_dtype", [int, np.int16, np.int32, np.int64])
|
192 |
+
def test_as_json_table_type_int_dtypes(self, int_dtype):
|
193 |
+
assert as_json_table_type(int_dtype) == "integer"
|
194 |
+
|
195 |
+
@pytest.mark.parametrize("float_dtype", [float, np.float16, np.float32, np.float64])
|
196 |
+
def test_as_json_table_type_float_dtypes(self, float_dtype):
|
197 |
+
assert as_json_table_type(float_dtype) == "number"
|
198 |
+
|
199 |
+
@pytest.mark.parametrize("bool_dtype", [bool, np.bool_])
|
200 |
+
def test_as_json_table_type_bool_dtypes(self, bool_dtype):
|
201 |
+
assert as_json_table_type(bool_dtype) == "boolean"
|
202 |
+
|
203 |
+
@pytest.mark.parametrize(
|
204 |
+
"date_dtype",
|
205 |
+
[
|
206 |
+
np.dtype("<M8[ns]"),
|
207 |
+
PeriodDtype("D"),
|
208 |
+
DatetimeTZDtype("ns", "US/Central"),
|
209 |
+
],
|
210 |
+
)
|
211 |
+
def test_as_json_table_type_date_dtypes(self, date_dtype):
|
212 |
+
# TODO: datedate.date? datetime.time?
|
213 |
+
assert as_json_table_type(date_dtype) == "datetime"
|
214 |
+
|
215 |
+
@pytest.mark.parametrize("td_dtype", [np.dtype("<m8[ns]")])
|
216 |
+
def test_as_json_table_type_timedelta_dtypes(self, td_dtype):
|
217 |
+
assert as_json_table_type(td_dtype) == "duration"
|
218 |
+
|
219 |
+
@pytest.mark.parametrize("str_dtype", [object]) # TODO(GH#14904) flesh out dtypes?
|
220 |
+
def test_as_json_table_type_string_dtypes(self, str_dtype):
|
221 |
+
assert as_json_table_type(str_dtype) == "string"
|
222 |
+
|
223 |
+
def test_as_json_table_type_categorical_dtypes(self):
|
224 |
+
assert as_json_table_type(pd.Categorical(["a"]).dtype) == "any"
|
225 |
+
assert as_json_table_type(CategoricalDtype()) == "any"
|
226 |
+
|
227 |
+
|
228 |
+
class TestTableOrient:
|
229 |
+
def test_build_series(self):
|
230 |
+
s = pd.Series([1, 2], name="a")
|
231 |
+
s.index.name = "id"
|
232 |
+
result = s.to_json(orient="table", date_format="iso")
|
233 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
234 |
+
|
235 |
+
assert "pandas_version" in result["schema"]
|
236 |
+
result["schema"].pop("pandas_version")
|
237 |
+
|
238 |
+
fields = [{"name": "id", "type": "integer"}, {"name": "a", "type": "integer"}]
|
239 |
+
|
240 |
+
schema = {"fields": fields, "primaryKey": ["id"]}
|
241 |
+
|
242 |
+
expected = OrderedDict(
|
243 |
+
[
|
244 |
+
("schema", schema),
|
245 |
+
(
|
246 |
+
"data",
|
247 |
+
[
|
248 |
+
OrderedDict([("id", 0), ("a", 1)]),
|
249 |
+
OrderedDict([("id", 1), ("a", 2)]),
|
250 |
+
],
|
251 |
+
),
|
252 |
+
]
|
253 |
+
)
|
254 |
+
|
255 |
+
assert result == expected
|
256 |
+
|
257 |
+
def test_read_json_from_to_json_results(self):
|
258 |
+
# GH32383
|
259 |
+
df = DataFrame(
|
260 |
+
{
|
261 |
+
"_id": {"row_0": 0},
|
262 |
+
"category": {"row_0": "Goods"},
|
263 |
+
"recommender_id": {"row_0": 3},
|
264 |
+
"recommender_name_jp": {"row_0": "浦田"},
|
265 |
+
"recommender_name_en": {"row_0": "Urata"},
|
266 |
+
"name_jp": {"row_0": "博多人形(松尾吉将まつお よしまさ)"},
|
267 |
+
"name_en": {"row_0": "Hakata Dolls Matsuo"},
|
268 |
+
}
|
269 |
+
)
|
270 |
+
|
271 |
+
result1 = pd.read_json(StringIO(df.to_json()))
|
272 |
+
result2 = DataFrame.from_dict(json.loads(df.to_json()))
|
273 |
+
tm.assert_frame_equal(result1, df)
|
274 |
+
tm.assert_frame_equal(result2, df)
|
275 |
+
|
276 |
+
def test_to_json(self, df_table, using_infer_string):
|
277 |
+
df = df_table
|
278 |
+
df.index.name = "idx"
|
279 |
+
result = df.to_json(orient="table", date_format="iso")
|
280 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
281 |
+
|
282 |
+
assert "pandas_version" in result["schema"]
|
283 |
+
result["schema"].pop("pandas_version")
|
284 |
+
|
285 |
+
fields = [
|
286 |
+
{"name": "idx", "type": "integer"},
|
287 |
+
{"name": "A", "type": "integer"},
|
288 |
+
{"name": "B", "type": "string"},
|
289 |
+
{"name": "C", "type": "datetime"},
|
290 |
+
{"name": "D", "type": "duration"},
|
291 |
+
{
|
292 |
+
"constraints": {"enum": ["a", "b", "c"]},
|
293 |
+
"name": "E",
|
294 |
+
"ordered": False,
|
295 |
+
"type": "any",
|
296 |
+
},
|
297 |
+
{
|
298 |
+
"constraints": {"enum": ["a", "b", "c"]},
|
299 |
+
"name": "F",
|
300 |
+
"ordered": True,
|
301 |
+
"type": "any",
|
302 |
+
},
|
303 |
+
{"name": "G", "type": "number"},
|
304 |
+
{"name": "H", "type": "datetime", "tz": "US/Central"},
|
305 |
+
]
|
306 |
+
|
307 |
+
if using_infer_string:
|
308 |
+
fields[2] = {"name": "B", "type": "any", "extDtype": "string"}
|
309 |
+
|
310 |
+
schema = {"fields": fields, "primaryKey": ["idx"]}
|
311 |
+
data = [
|
312 |
+
OrderedDict(
|
313 |
+
[
|
314 |
+
("idx", 0),
|
315 |
+
("A", 1),
|
316 |
+
("B", "a"),
|
317 |
+
("C", "2016-01-01T00:00:00.000"),
|
318 |
+
("D", "P0DT1H0M0S"),
|
319 |
+
("E", "a"),
|
320 |
+
("F", "a"),
|
321 |
+
("G", 1.0),
|
322 |
+
("H", "2016-01-01T06:00:00.000Z"),
|
323 |
+
]
|
324 |
+
),
|
325 |
+
OrderedDict(
|
326 |
+
[
|
327 |
+
("idx", 1),
|
328 |
+
("A", 2),
|
329 |
+
("B", "b"),
|
330 |
+
("C", "2016-01-02T00:00:00.000"),
|
331 |
+
("D", "P0DT1H1M0S"),
|
332 |
+
("E", "b"),
|
333 |
+
("F", "b"),
|
334 |
+
("G", 2.0),
|
335 |
+
("H", "2016-01-02T06:00:00.000Z"),
|
336 |
+
]
|
337 |
+
),
|
338 |
+
OrderedDict(
|
339 |
+
[
|
340 |
+
("idx", 2),
|
341 |
+
("A", 3),
|
342 |
+
("B", "c"),
|
343 |
+
("C", "2016-01-03T00:00:00.000"),
|
344 |
+
("D", "P0DT1H2M0S"),
|
345 |
+
("E", "c"),
|
346 |
+
("F", "c"),
|
347 |
+
("G", 3.0),
|
348 |
+
("H", "2016-01-03T06:00:00.000Z"),
|
349 |
+
]
|
350 |
+
),
|
351 |
+
OrderedDict(
|
352 |
+
[
|
353 |
+
("idx", 3),
|
354 |
+
("A", 4),
|
355 |
+
("B", "c"),
|
356 |
+
("C", "2016-01-04T00:00:00.000"),
|
357 |
+
("D", "P0DT1H3M0S"),
|
358 |
+
("E", "c"),
|
359 |
+
("F", "c"),
|
360 |
+
("G", 4.0),
|
361 |
+
("H", "2016-01-04T06:00:00.000Z"),
|
362 |
+
]
|
363 |
+
),
|
364 |
+
]
|
365 |
+
expected = OrderedDict([("schema", schema), ("data", data)])
|
366 |
+
|
367 |
+
assert result == expected
|
368 |
+
|
369 |
+
def test_to_json_float_index(self):
|
370 |
+
data = pd.Series(1, index=[1.0, 2.0])
|
371 |
+
result = data.to_json(orient="table", date_format="iso")
|
372 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
373 |
+
result["schema"].pop("pandas_version")
|
374 |
+
|
375 |
+
expected = OrderedDict(
|
376 |
+
[
|
377 |
+
(
|
378 |
+
"schema",
|
379 |
+
{
|
380 |
+
"fields": [
|
381 |
+
{"name": "index", "type": "number"},
|
382 |
+
{"name": "values", "type": "integer"},
|
383 |
+
],
|
384 |
+
"primaryKey": ["index"],
|
385 |
+
},
|
386 |
+
),
|
387 |
+
(
|
388 |
+
"data",
|
389 |
+
[
|
390 |
+
OrderedDict([("index", 1.0), ("values", 1)]),
|
391 |
+
OrderedDict([("index", 2.0), ("values", 1)]),
|
392 |
+
],
|
393 |
+
),
|
394 |
+
]
|
395 |
+
)
|
396 |
+
|
397 |
+
assert result == expected
|
398 |
+
|
399 |
+
def test_to_json_period_index(self):
|
400 |
+
idx = pd.period_range("2016", freq="Q-JAN", periods=2)
|
401 |
+
data = pd.Series(1, idx)
|
402 |
+
result = data.to_json(orient="table", date_format="iso")
|
403 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
404 |
+
result["schema"].pop("pandas_version")
|
405 |
+
|
406 |
+
fields = [
|
407 |
+
{"freq": "QE-JAN", "name": "index", "type": "datetime"},
|
408 |
+
{"name": "values", "type": "integer"},
|
409 |
+
]
|
410 |
+
|
411 |
+
schema = {"fields": fields, "primaryKey": ["index"]}
|
412 |
+
data = [
|
413 |
+
OrderedDict([("index", "2015-11-01T00:00:00.000"), ("values", 1)]),
|
414 |
+
OrderedDict([("index", "2016-02-01T00:00:00.000"), ("values", 1)]),
|
415 |
+
]
|
416 |
+
expected = OrderedDict([("schema", schema), ("data", data)])
|
417 |
+
|
418 |
+
assert result == expected
|
419 |
+
|
420 |
+
def test_to_json_categorical_index(self):
|
421 |
+
data = pd.Series(1, pd.CategoricalIndex(["a", "b"]))
|
422 |
+
result = data.to_json(orient="table", date_format="iso")
|
423 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
424 |
+
result["schema"].pop("pandas_version")
|
425 |
+
|
426 |
+
expected = OrderedDict(
|
427 |
+
[
|
428 |
+
(
|
429 |
+
"schema",
|
430 |
+
{
|
431 |
+
"fields": [
|
432 |
+
{
|
433 |
+
"name": "index",
|
434 |
+
"type": "any",
|
435 |
+
"constraints": {"enum": ["a", "b"]},
|
436 |
+
"ordered": False,
|
437 |
+
},
|
438 |
+
{"name": "values", "type": "integer"},
|
439 |
+
],
|
440 |
+
"primaryKey": ["index"],
|
441 |
+
},
|
442 |
+
),
|
443 |
+
(
|
444 |
+
"data",
|
445 |
+
[
|
446 |
+
OrderedDict([("index", "a"), ("values", 1)]),
|
447 |
+
OrderedDict([("index", "b"), ("values", 1)]),
|
448 |
+
],
|
449 |
+
),
|
450 |
+
]
|
451 |
+
)
|
452 |
+
|
453 |
+
assert result == expected
|
454 |
+
|
455 |
+
def test_date_format_raises(self, df_table):
|
456 |
+
msg = (
|
457 |
+
"Trying to write with `orient='table'` and `date_format='epoch'`. Table "
|
458 |
+
"Schema requires dates to be formatted with `date_format='iso'`"
|
459 |
+
)
|
460 |
+
with pytest.raises(ValueError, match=msg):
|
461 |
+
df_table.to_json(orient="table", date_format="epoch")
|
462 |
+
|
463 |
+
# others work
|
464 |
+
df_table.to_json(orient="table", date_format="iso")
|
465 |
+
df_table.to_json(orient="table")
|
466 |
+
|
467 |
+
def test_convert_pandas_type_to_json_field_int(self, index_or_series):
|
468 |
+
kind = index_or_series
|
469 |
+
data = [1, 2, 3]
|
470 |
+
result = convert_pandas_type_to_json_field(kind(data, name="name"))
|
471 |
+
expected = {"name": "name", "type": "integer"}
|
472 |
+
assert result == expected
|
473 |
+
|
474 |
+
def test_convert_pandas_type_to_json_field_float(self, index_or_series):
|
475 |
+
kind = index_or_series
|
476 |
+
data = [1.0, 2.0, 3.0]
|
477 |
+
result = convert_pandas_type_to_json_field(kind(data, name="name"))
|
478 |
+
expected = {"name": "name", "type": "number"}
|
479 |
+
assert result == expected
|
480 |
+
|
481 |
+
@pytest.mark.parametrize(
|
482 |
+
"dt_args,extra_exp", [({}, {}), ({"utc": True}, {"tz": "UTC"})]
|
483 |
+
)
|
484 |
+
@pytest.mark.parametrize("wrapper", [None, pd.Series])
|
485 |
+
def test_convert_pandas_type_to_json_field_datetime(
|
486 |
+
self, dt_args, extra_exp, wrapper
|
487 |
+
):
|
488 |
+
data = [1.0, 2.0, 3.0]
|
489 |
+
data = pd.to_datetime(data, **dt_args)
|
490 |
+
if wrapper is pd.Series:
|
491 |
+
data = pd.Series(data, name="values")
|
492 |
+
result = convert_pandas_type_to_json_field(data)
|
493 |
+
expected = {"name": "values", "type": "datetime"}
|
494 |
+
expected.update(extra_exp)
|
495 |
+
assert result == expected
|
496 |
+
|
497 |
+
def test_convert_pandas_type_to_json_period_range(self):
|
498 |
+
arr = pd.period_range("2016", freq="Y-DEC", periods=4)
|
499 |
+
result = convert_pandas_type_to_json_field(arr)
|
500 |
+
expected = {"name": "values", "type": "datetime", "freq": "YE-DEC"}
|
501 |
+
assert result == expected
|
502 |
+
|
503 |
+
@pytest.mark.parametrize("kind", [pd.Categorical, pd.CategoricalIndex])
|
504 |
+
@pytest.mark.parametrize("ordered", [True, False])
|
505 |
+
def test_convert_pandas_type_to_json_field_categorical(self, kind, ordered):
|
506 |
+
data = ["a", "b", "c"]
|
507 |
+
if kind is pd.Categorical:
|
508 |
+
arr = pd.Series(kind(data, ordered=ordered), name="cats")
|
509 |
+
elif kind is pd.CategoricalIndex:
|
510 |
+
arr = kind(data, ordered=ordered, name="cats")
|
511 |
+
|
512 |
+
result = convert_pandas_type_to_json_field(arr)
|
513 |
+
expected = {
|
514 |
+
"name": "cats",
|
515 |
+
"type": "any",
|
516 |
+
"constraints": {"enum": data},
|
517 |
+
"ordered": ordered,
|
518 |
+
}
|
519 |
+
assert result == expected
|
520 |
+
|
521 |
+
@pytest.mark.parametrize(
|
522 |
+
"inp,exp",
|
523 |
+
[
|
524 |
+
({"type": "integer"}, "int64"),
|
525 |
+
({"type": "number"}, "float64"),
|
526 |
+
({"type": "boolean"}, "bool"),
|
527 |
+
({"type": "duration"}, "timedelta64"),
|
528 |
+
({"type": "datetime"}, "datetime64[ns]"),
|
529 |
+
({"type": "datetime", "tz": "US/Hawaii"}, "datetime64[ns, US/Hawaii]"),
|
530 |
+
({"type": "any"}, "object"),
|
531 |
+
(
|
532 |
+
{
|
533 |
+
"type": "any",
|
534 |
+
"constraints": {"enum": ["a", "b", "c"]},
|
535 |
+
"ordered": False,
|
536 |
+
},
|
537 |
+
CategoricalDtype(categories=["a", "b", "c"], ordered=False),
|
538 |
+
),
|
539 |
+
(
|
540 |
+
{
|
541 |
+
"type": "any",
|
542 |
+
"constraints": {"enum": ["a", "b", "c"]},
|
543 |
+
"ordered": True,
|
544 |
+
},
|
545 |
+
CategoricalDtype(categories=["a", "b", "c"], ordered=True),
|
546 |
+
),
|
547 |
+
({"type": "string"}, "object"),
|
548 |
+
],
|
549 |
+
)
|
550 |
+
def test_convert_json_field_to_pandas_type(self, inp, exp):
|
551 |
+
field = {"name": "foo"}
|
552 |
+
field.update(inp)
|
553 |
+
assert convert_json_field_to_pandas_type(field) == exp
|
554 |
+
|
555 |
+
@pytest.mark.parametrize("inp", ["geopoint", "geojson", "fake_type"])
|
556 |
+
def test_convert_json_field_to_pandas_type_raises(self, inp):
|
557 |
+
field = {"type": inp}
|
558 |
+
with pytest.raises(
|
559 |
+
ValueError, match=f"Unsupported or invalid field type: {inp}"
|
560 |
+
):
|
561 |
+
convert_json_field_to_pandas_type(field)
|
562 |
+
|
563 |
+
def test_categorical(self):
|
564 |
+
s = pd.Series(pd.Categorical(["a", "b", "a"]))
|
565 |
+
s.index.name = "idx"
|
566 |
+
result = s.to_json(orient="table", date_format="iso")
|
567 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
568 |
+
result["schema"].pop("pandas_version")
|
569 |
+
|
570 |
+
fields = [
|
571 |
+
{"name": "idx", "type": "integer"},
|
572 |
+
{
|
573 |
+
"constraints": {"enum": ["a", "b"]},
|
574 |
+
"name": "values",
|
575 |
+
"ordered": False,
|
576 |
+
"type": "any",
|
577 |
+
},
|
578 |
+
]
|
579 |
+
|
580 |
+
expected = OrderedDict(
|
581 |
+
[
|
582 |
+
("schema", {"fields": fields, "primaryKey": ["idx"]}),
|
583 |
+
(
|
584 |
+
"data",
|
585 |
+
[
|
586 |
+
OrderedDict([("idx", 0), ("values", "a")]),
|
587 |
+
OrderedDict([("idx", 1), ("values", "b")]),
|
588 |
+
OrderedDict([("idx", 2), ("values", "a")]),
|
589 |
+
],
|
590 |
+
),
|
591 |
+
]
|
592 |
+
)
|
593 |
+
|
594 |
+
assert result == expected
|
595 |
+
|
596 |
+
@pytest.mark.parametrize(
|
597 |
+
"idx,nm,prop",
|
598 |
+
[
|
599 |
+
(pd.Index([1]), "index", "name"),
|
600 |
+
(pd.Index([1], name="myname"), "myname", "name"),
|
601 |
+
(
|
602 |
+
pd.MultiIndex.from_product([("a", "b"), ("c", "d")]),
|
603 |
+
["level_0", "level_1"],
|
604 |
+
"names",
|
605 |
+
),
|
606 |
+
(
|
607 |
+
pd.MultiIndex.from_product(
|
608 |
+
[("a", "b"), ("c", "d")], names=["n1", "n2"]
|
609 |
+
),
|
610 |
+
["n1", "n2"],
|
611 |
+
"names",
|
612 |
+
),
|
613 |
+
(
|
614 |
+
pd.MultiIndex.from_product(
|
615 |
+
[("a", "b"), ("c", "d")], names=["n1", None]
|
616 |
+
),
|
617 |
+
["n1", "level_1"],
|
618 |
+
"names",
|
619 |
+
),
|
620 |
+
],
|
621 |
+
)
|
622 |
+
def test_set_names_unset(self, idx, nm, prop):
|
623 |
+
data = pd.Series(1, idx)
|
624 |
+
result = set_default_names(data)
|
625 |
+
assert getattr(result.index, prop) == nm
|
626 |
+
|
627 |
+
@pytest.mark.parametrize(
|
628 |
+
"idx",
|
629 |
+
[
|
630 |
+
pd.Index([], name="index"),
|
631 |
+
pd.MultiIndex.from_arrays([["foo"], ["bar"]], names=("level_0", "level_1")),
|
632 |
+
pd.MultiIndex.from_arrays([["foo"], ["bar"]], names=("foo", "level_1")),
|
633 |
+
],
|
634 |
+
)
|
635 |
+
def test_warns_non_roundtrippable_names(self, idx):
|
636 |
+
# GH 19130
|
637 |
+
df = DataFrame(index=idx)
|
638 |
+
df.index.name = "index"
|
639 |
+
with tm.assert_produces_warning():
|
640 |
+
set_default_names(df)
|
641 |
+
|
642 |
+
def test_timestamp_in_columns(self):
|
643 |
+
df = DataFrame(
|
644 |
+
[[1, 2]], columns=[pd.Timestamp("2016"), pd.Timedelta(10, unit="s")]
|
645 |
+
)
|
646 |
+
result = df.to_json(orient="table")
|
647 |
+
js = json.loads(result)
|
648 |
+
assert js["schema"]["fields"][1]["name"] == "2016-01-01T00:00:00.000"
|
649 |
+
assert js["schema"]["fields"][2]["name"] == "P0DT0H0M10S"
|
650 |
+
|
651 |
+
@pytest.mark.parametrize(
|
652 |
+
"case",
|
653 |
+
[
|
654 |
+
pd.Series([1], index=pd.Index([1], name="a"), name="a"),
|
655 |
+
DataFrame({"A": [1]}, index=pd.Index([1], name="A")),
|
656 |
+
DataFrame(
|
657 |
+
{"A": [1]},
|
658 |
+
index=pd.MultiIndex.from_arrays([["a"], [1]], names=["A", "a"]),
|
659 |
+
),
|
660 |
+
],
|
661 |
+
)
|
662 |
+
def test_overlapping_names(self, case):
|
663 |
+
with pytest.raises(ValueError, match="Overlapping"):
|
664 |
+
case.to_json(orient="table")
|
665 |
+
|
666 |
+
def test_mi_falsey_name(self):
|
667 |
+
# GH 16203
|
668 |
+
df = DataFrame(
|
669 |
+
np.random.default_rng(2).standard_normal((4, 4)),
|
670 |
+
index=pd.MultiIndex.from_product([("A", "B"), ("a", "b")]),
|
671 |
+
)
|
672 |
+
result = [x["name"] for x in build_table_schema(df)["fields"]]
|
673 |
+
assert result == ["level_0", "level_1", 0, 1, 2, 3]
|
674 |
+
|
675 |
+
|
676 |
+
class TestTableOrientReader:
|
677 |
+
@pytest.mark.parametrize(
|
678 |
+
"index_nm",
|
679 |
+
[None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"],
|
680 |
+
)
|
681 |
+
@pytest.mark.parametrize(
|
682 |
+
"vals",
|
683 |
+
[
|
684 |
+
{"ints": [1, 2, 3, 4]},
|
685 |
+
{"objects": ["a", "b", "c", "d"]},
|
686 |
+
{"objects": ["1", "2", "3", "4"]},
|
687 |
+
{"date_ranges": pd.date_range("2016-01-01", freq="d", periods=4)},
|
688 |
+
{"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))},
|
689 |
+
{
|
690 |
+
"ordered_cats": pd.Series(
|
691 |
+
pd.Categorical(["a", "b", "c", "c"], ordered=True)
|
692 |
+
)
|
693 |
+
},
|
694 |
+
{"floats": [1.0, 2.0, 3.0, 4.0]},
|
695 |
+
{"floats": [1.1, 2.2, 3.3, 4.4]},
|
696 |
+
{"bools": [True, False, False, True]},
|
697 |
+
{
|
698 |
+
"timezones": pd.date_range(
|
699 |
+
"2016-01-01", freq="d", periods=4, tz="US/Central"
|
700 |
+
) # added in # GH 35973
|
701 |
+
},
|
702 |
+
],
|
703 |
+
)
|
704 |
+
def test_read_json_table_orient(self, index_nm, vals, recwarn):
|
705 |
+
df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))
|
706 |
+
out = df.to_json(orient="table")
|
707 |
+
result = pd.read_json(out, orient="table")
|
708 |
+
tm.assert_frame_equal(df, result)
|
709 |
+
|
710 |
+
@pytest.mark.parametrize("index_nm", [None, "idx", "index"])
|
711 |
+
@pytest.mark.parametrize(
|
712 |
+
"vals",
|
713 |
+
[{"timedeltas": pd.timedelta_range("1h", periods=4, freq="min")}],
|
714 |
+
)
|
715 |
+
def test_read_json_table_orient_raises(self, index_nm, vals, recwarn):
|
716 |
+
df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))
|
717 |
+
out = df.to_json(orient="table")
|
718 |
+
with pytest.raises(NotImplementedError, match="can not yet read "):
|
719 |
+
pd.read_json(out, orient="table")
|
720 |
+
|
721 |
+
@pytest.mark.parametrize(
|
722 |
+
"index_nm",
|
723 |
+
[None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"],
|
724 |
+
)
|
725 |
+
@pytest.mark.parametrize(
|
726 |
+
"vals",
|
727 |
+
[
|
728 |
+
{"ints": [1, 2, 3, 4]},
|
729 |
+
{"objects": ["a", "b", "c", "d"]},
|
730 |
+
{"objects": ["1", "2", "3", "4"]},
|
731 |
+
{"date_ranges": pd.date_range("2016-01-01", freq="d", periods=4)},
|
732 |
+
{"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))},
|
733 |
+
{
|
734 |
+
"ordered_cats": pd.Series(
|
735 |
+
pd.Categorical(["a", "b", "c", "c"], ordered=True)
|
736 |
+
)
|
737 |
+
},
|
738 |
+
{"floats": [1.0, 2.0, 3.0, 4.0]},
|
739 |
+
{"floats": [1.1, 2.2, 3.3, 4.4]},
|
740 |
+
{"bools": [True, False, False, True]},
|
741 |
+
{
|
742 |
+
"timezones": pd.date_range(
|
743 |
+
"2016-01-01", freq="d", periods=4, tz="US/Central"
|
744 |
+
) # added in # GH 35973
|
745 |
+
},
|
746 |
+
],
|
747 |
+
)
|
748 |
+
def test_read_json_table_period_orient(self, index_nm, vals, recwarn):
|
749 |
+
df = DataFrame(
|
750 |
+
vals,
|
751 |
+
index=pd.Index(
|
752 |
+
(pd.Period(f"2022Q{q}") for q in range(1, 5)), name=index_nm
|
753 |
+
),
|
754 |
+
)
|
755 |
+
out = df.to_json(orient="table")
|
756 |
+
result = pd.read_json(out, orient="table")
|
757 |
+
tm.assert_frame_equal(df, result)
|
758 |
+
|
759 |
+
@pytest.mark.parametrize(
|
760 |
+
"idx",
|
761 |
+
[
|
762 |
+
pd.Index(range(4)),
|
763 |
+
pd.date_range(
|
764 |
+
"2020-08-30",
|
765 |
+
freq="d",
|
766 |
+
periods=4,
|
767 |
+
)._with_freq(None),
|
768 |
+
pd.date_range(
|
769 |
+
"2020-08-30", freq="d", periods=4, tz="US/Central"
|
770 |
+
)._with_freq(None),
|
771 |
+
pd.MultiIndex.from_product(
|
772 |
+
[
|
773 |
+
pd.date_range("2020-08-30", freq="d", periods=2, tz="US/Central"),
|
774 |
+
["x", "y"],
|
775 |
+
],
|
776 |
+
),
|
777 |
+
],
|
778 |
+
)
|
779 |
+
@pytest.mark.parametrize(
|
780 |
+
"vals",
|
781 |
+
[
|
782 |
+
{"floats": [1.1, 2.2, 3.3, 4.4]},
|
783 |
+
{"dates": pd.date_range("2020-08-30", freq="d", periods=4)},
|
784 |
+
{
|
785 |
+
"timezones": pd.date_range(
|
786 |
+
"2020-08-30", freq="d", periods=4, tz="Europe/London"
|
787 |
+
)
|
788 |
+
},
|
789 |
+
],
|
790 |
+
)
|
791 |
+
def test_read_json_table_timezones_orient(self, idx, vals, recwarn):
|
792 |
+
# GH 35973
|
793 |
+
df = DataFrame(vals, index=idx)
|
794 |
+
out = df.to_json(orient="table")
|
795 |
+
result = pd.read_json(out, orient="table")
|
796 |
+
tm.assert_frame_equal(df, result)
|
797 |
+
|
798 |
+
def test_comprehensive(self):
|
799 |
+
df = DataFrame(
|
800 |
+
{
|
801 |
+
"A": [1, 2, 3, 4],
|
802 |
+
"B": ["a", "b", "c", "c"],
|
803 |
+
"C": pd.date_range("2016-01-01", freq="d", periods=4),
|
804 |
+
# 'D': pd.timedelta_range('1h', periods=4, freq='min'),
|
805 |
+
"E": pd.Series(pd.Categorical(["a", "b", "c", "c"])),
|
806 |
+
"F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)),
|
807 |
+
"G": [1.1, 2.2, 3.3, 4.4],
|
808 |
+
"H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"),
|
809 |
+
"I": [True, False, False, True],
|
810 |
+
},
|
811 |
+
index=pd.Index(range(4), name="idx"),
|
812 |
+
)
|
813 |
+
|
814 |
+
out = StringIO(df.to_json(orient="table"))
|
815 |
+
result = pd.read_json(out, orient="table")
|
816 |
+
tm.assert_frame_equal(df, result)
|
817 |
+
|
818 |
+
@pytest.mark.parametrize(
|
819 |
+
"index_names",
|
820 |
+
[[None, None], ["foo", "bar"], ["foo", None], [None, "foo"], ["index", "foo"]],
|
821 |
+
)
|
822 |
+
def test_multiindex(self, index_names):
|
823 |
+
# GH 18912
|
824 |
+
df = DataFrame(
|
825 |
+
[["Arr", "alpha", [1, 2, 3, 4]], ["Bee", "Beta", [10, 20, 30, 40]]],
|
826 |
+
index=[["A", "B"], ["Null", "Eins"]],
|
827 |
+
columns=["Aussprache", "Griechisch", "Args"],
|
828 |
+
)
|
829 |
+
df.index.names = index_names
|
830 |
+
out = StringIO(df.to_json(orient="table"))
|
831 |
+
result = pd.read_json(out, orient="table")
|
832 |
+
tm.assert_frame_equal(df, result)
|
833 |
+
|
834 |
+
def test_empty_frame_roundtrip(self):
|
835 |
+
# GH 21287
|
836 |
+
df = DataFrame(columns=["a", "b", "c"])
|
837 |
+
expected = df.copy()
|
838 |
+
out = StringIO(df.to_json(orient="table"))
|
839 |
+
result = pd.read_json(out, orient="table")
|
840 |
+
tm.assert_frame_equal(expected, result)
|
841 |
+
|
842 |
+
def test_read_json_orient_table_old_schema_version(self):
|
843 |
+
df_json = """
|
844 |
+
{
|
845 |
+
"schema":{
|
846 |
+
"fields":[
|
847 |
+
{"name":"index","type":"integer"},
|
848 |
+
{"name":"a","type":"string"}
|
849 |
+
],
|
850 |
+
"primaryKey":["index"],
|
851 |
+
"pandas_version":"0.20.0"
|
852 |
+
},
|
853 |
+
"data":[
|
854 |
+
{"index":0,"a":1},
|
855 |
+
{"index":1,"a":2.0},
|
856 |
+
{"index":2,"a":"s"}
|
857 |
+
]
|
858 |
+
}
|
859 |
+
"""
|
860 |
+
expected = DataFrame({"a": [1, 2.0, "s"]})
|
861 |
+
result = pd.read_json(StringIO(df_json), orient="table")
|
862 |
+
tm.assert_frame_equal(expected, result)
|
863 |
+
|
864 |
+
@pytest.mark.parametrize("freq", ["M", "2M", "Q", "2Q", "Y", "2Y"])
|
865 |
+
def test_read_json_table_orient_period_depr_freq(self, freq, recwarn):
|
866 |
+
# GH#9586
|
867 |
+
df = DataFrame(
|
868 |
+
{"ints": [1, 2]},
|
869 |
+
index=pd.PeriodIndex(["2020-01", "2021-06"], freq=freq),
|
870 |
+
)
|
871 |
+
out = df.to_json(orient="table")
|
872 |
+
result = pd.read_json(out, orient="table")
|
873 |
+
tm.assert_frame_equal(df, result)
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py
ADDED
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections.abc import Iterator
|
2 |
+
from io import StringIO
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
from pandas import (
|
10 |
+
DataFrame,
|
11 |
+
read_json,
|
12 |
+
)
|
13 |
+
import pandas._testing as tm
|
14 |
+
|
15 |
+
from pandas.io.json._json import JsonReader
|
16 |
+
|
17 |
+
pytestmark = pytest.mark.filterwarnings(
|
18 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
@pytest.fixture
|
23 |
+
def lines_json_df():
|
24 |
+
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
|
25 |
+
return df.to_json(lines=True, orient="records")
|
26 |
+
|
27 |
+
|
28 |
+
@pytest.fixture(params=["ujson", "pyarrow"])
|
29 |
+
def engine(request):
|
30 |
+
if request.param == "pyarrow":
|
31 |
+
pytest.importorskip("pyarrow.json")
|
32 |
+
return request.param
|
33 |
+
|
34 |
+
|
35 |
+
def test_read_jsonl():
|
36 |
+
# GH9180
|
37 |
+
result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)
|
38 |
+
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
39 |
+
tm.assert_frame_equal(result, expected)
|
40 |
+
|
41 |
+
|
42 |
+
def test_read_jsonl_engine_pyarrow(datapath, engine):
|
43 |
+
result = read_json(
|
44 |
+
datapath("io", "json", "data", "line_delimited.json"),
|
45 |
+
lines=True,
|
46 |
+
engine=engine,
|
47 |
+
)
|
48 |
+
expected = DataFrame({"a": [1, 3, 5], "b": [2, 4, 6]})
|
49 |
+
tm.assert_frame_equal(result, expected)
|
50 |
+
|
51 |
+
|
52 |
+
def test_read_datetime(request, engine):
|
53 |
+
# GH33787
|
54 |
+
if engine == "pyarrow":
|
55 |
+
# GH 48893
|
56 |
+
reason = "Pyarrow only supports a file path as an input and line delimited json"
|
57 |
+
request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
|
58 |
+
|
59 |
+
df = DataFrame(
|
60 |
+
[([1, 2], ["2020-03-05", "2020-04-08T09:58:49+00:00"], "hector")],
|
61 |
+
columns=["accounts", "date", "name"],
|
62 |
+
)
|
63 |
+
json_line = df.to_json(lines=True, orient="records")
|
64 |
+
|
65 |
+
if engine == "pyarrow":
|
66 |
+
result = read_json(StringIO(json_line), engine=engine)
|
67 |
+
else:
|
68 |
+
result = read_json(StringIO(json_line), engine=engine)
|
69 |
+
expected = DataFrame(
|
70 |
+
[[1, "2020-03-05", "hector"], [2, "2020-04-08T09:58:49+00:00", "hector"]],
|
71 |
+
columns=["accounts", "date", "name"],
|
72 |
+
)
|
73 |
+
tm.assert_frame_equal(result, expected)
|
74 |
+
|
75 |
+
|
76 |
+
def test_read_jsonl_unicode_chars():
|
77 |
+
# GH15132: non-ascii unicode characters
|
78 |
+
# \u201d == RIGHT DOUBLE QUOTATION MARK
|
79 |
+
|
80 |
+
# simulate file handle
|
81 |
+
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
|
82 |
+
json = StringIO(json)
|
83 |
+
result = read_json(json, lines=True)
|
84 |
+
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
|
85 |
+
tm.assert_frame_equal(result, expected)
|
86 |
+
|
87 |
+
# simulate string
|
88 |
+
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
|
89 |
+
result = read_json(StringIO(json), lines=True)
|
90 |
+
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
|
91 |
+
tm.assert_frame_equal(result, expected)
|
92 |
+
|
93 |
+
|
94 |
+
def test_to_jsonl():
|
95 |
+
# GH9180
|
96 |
+
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
97 |
+
result = df.to_json(orient="records", lines=True)
|
98 |
+
expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'
|
99 |
+
assert result == expected
|
100 |
+
|
101 |
+
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
|
102 |
+
result = df.to_json(orient="records", lines=True)
|
103 |
+
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
|
104 |
+
assert result == expected
|
105 |
+
tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
|
106 |
+
|
107 |
+
# GH15096: escaped characters in columns and data
|
108 |
+
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
|
109 |
+
result = df.to_json(orient="records", lines=True)
|
110 |
+
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
|
111 |
+
assert result == expected
|
112 |
+
tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
|
113 |
+
|
114 |
+
|
115 |
+
def test_to_jsonl_count_new_lines():
|
116 |
+
# GH36888
|
117 |
+
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
118 |
+
actual_new_lines_count = df.to_json(orient="records", lines=True).count("\n")
|
119 |
+
expected_new_lines_count = 2
|
120 |
+
assert actual_new_lines_count == expected_new_lines_count
|
121 |
+
|
122 |
+
|
123 |
+
@pytest.mark.parametrize("chunksize", [1, 1.0])
|
124 |
+
def test_readjson_chunks(request, lines_json_df, chunksize, engine):
|
125 |
+
# Basic test that read_json(chunks=True) gives the same result as
|
126 |
+
# read_json(chunks=False)
|
127 |
+
# GH17048: memory usage when lines=True
|
128 |
+
|
129 |
+
if engine == "pyarrow":
|
130 |
+
# GH 48893
|
131 |
+
reason = (
|
132 |
+
"Pyarrow only supports a file path as an input and line delimited json"
|
133 |
+
"and doesn't support chunksize parameter."
|
134 |
+
)
|
135 |
+
request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
|
136 |
+
|
137 |
+
unchunked = read_json(StringIO(lines_json_df), lines=True)
|
138 |
+
with read_json(
|
139 |
+
StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine
|
140 |
+
) as reader:
|
141 |
+
chunked = pd.concat(reader)
|
142 |
+
|
143 |
+
tm.assert_frame_equal(chunked, unchunked)
|
144 |
+
|
145 |
+
|
146 |
+
def test_readjson_chunksize_requires_lines(lines_json_df, engine):
|
147 |
+
msg = "chunksize can only be passed if lines=True"
|
148 |
+
with pytest.raises(ValueError, match=msg):
|
149 |
+
with read_json(
|
150 |
+
StringIO(lines_json_df), lines=False, chunksize=2, engine=engine
|
151 |
+
) as _:
|
152 |
+
pass
|
153 |
+
|
154 |
+
|
155 |
+
def test_readjson_chunks_series(request, engine):
|
156 |
+
if engine == "pyarrow":
|
157 |
+
# GH 48893
|
158 |
+
reason = (
|
159 |
+
"Pyarrow only supports a file path as an input and line delimited json"
|
160 |
+
"and doesn't support chunksize parameter."
|
161 |
+
)
|
162 |
+
request.applymarker(pytest.mark.xfail(reason=reason))
|
163 |
+
|
164 |
+
# Test reading line-format JSON to Series with chunksize param
|
165 |
+
s = pd.Series({"A": 1, "B": 2})
|
166 |
+
|
167 |
+
strio = StringIO(s.to_json(lines=True, orient="records"))
|
168 |
+
unchunked = read_json(strio, lines=True, typ="Series", engine=engine)
|
169 |
+
|
170 |
+
strio = StringIO(s.to_json(lines=True, orient="records"))
|
171 |
+
with read_json(
|
172 |
+
strio, lines=True, typ="Series", chunksize=1, engine=engine
|
173 |
+
) as reader:
|
174 |
+
chunked = pd.concat(reader)
|
175 |
+
|
176 |
+
tm.assert_series_equal(chunked, unchunked)
|
177 |
+
|
178 |
+
|
179 |
+
def test_readjson_each_chunk(request, lines_json_df, engine):
|
180 |
+
if engine == "pyarrow":
|
181 |
+
# GH 48893
|
182 |
+
reason = (
|
183 |
+
"Pyarrow only supports a file path as an input and line delimited json"
|
184 |
+
"and doesn't support chunksize parameter."
|
185 |
+
)
|
186 |
+
request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
|
187 |
+
|
188 |
+
# Other tests check that the final result of read_json(chunksize=True)
|
189 |
+
# is correct. This checks the intermediate chunks.
|
190 |
+
with read_json(
|
191 |
+
StringIO(lines_json_df), lines=True, chunksize=2, engine=engine
|
192 |
+
) as reader:
|
193 |
+
chunks = list(reader)
|
194 |
+
assert chunks[0].shape == (2, 2)
|
195 |
+
assert chunks[1].shape == (1, 2)
|
196 |
+
|
197 |
+
|
198 |
+
def test_readjson_chunks_from_file(request, engine):
|
199 |
+
if engine == "pyarrow":
|
200 |
+
# GH 48893
|
201 |
+
reason = (
|
202 |
+
"Pyarrow only supports a file path as an input and line delimited json"
|
203 |
+
"and doesn't support chunksize parameter."
|
204 |
+
)
|
205 |
+
request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
|
206 |
+
|
207 |
+
with tm.ensure_clean("test.json") as path:
|
208 |
+
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
|
209 |
+
df.to_json(path, lines=True, orient="records")
|
210 |
+
with read_json(path, lines=True, chunksize=1, engine=engine) as reader:
|
211 |
+
chunked = pd.concat(reader)
|
212 |
+
unchunked = read_json(path, lines=True, engine=engine)
|
213 |
+
tm.assert_frame_equal(unchunked, chunked)
|
214 |
+
|
215 |
+
|
216 |
+
@pytest.mark.parametrize("chunksize", [None, 1])
|
217 |
+
def test_readjson_chunks_closes(chunksize):
|
218 |
+
with tm.ensure_clean("test.json") as path:
|
219 |
+
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
|
220 |
+
df.to_json(path, lines=True, orient="records")
|
221 |
+
reader = JsonReader(
|
222 |
+
path,
|
223 |
+
orient=None,
|
224 |
+
typ="frame",
|
225 |
+
dtype=True,
|
226 |
+
convert_axes=True,
|
227 |
+
convert_dates=True,
|
228 |
+
keep_default_dates=True,
|
229 |
+
precise_float=False,
|
230 |
+
date_unit=None,
|
231 |
+
encoding=None,
|
232 |
+
lines=True,
|
233 |
+
chunksize=chunksize,
|
234 |
+
compression=None,
|
235 |
+
nrows=None,
|
236 |
+
)
|
237 |
+
with reader:
|
238 |
+
reader.read()
|
239 |
+
assert (
|
240 |
+
reader.handles.handle.closed
|
241 |
+
), f"didn't close stream with chunksize = {chunksize}"
|
242 |
+
|
243 |
+
|
244 |
+
@pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"])
|
245 |
+
def test_readjson_invalid_chunksize(lines_json_df, chunksize, engine):
|
246 |
+
msg = r"'chunksize' must be an integer >=1"
|
247 |
+
|
248 |
+
with pytest.raises(ValueError, match=msg):
|
249 |
+
with read_json(
|
250 |
+
StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine
|
251 |
+
) as _:
|
252 |
+
pass
|
253 |
+
|
254 |
+
|
255 |
+
@pytest.mark.parametrize("chunksize", [None, 1, 2])
|
256 |
+
def test_readjson_chunks_multiple_empty_lines(chunksize):
|
257 |
+
j = """
|
258 |
+
|
259 |
+
{"A":1,"B":4}
|
260 |
+
|
261 |
+
|
262 |
+
|
263 |
+
{"A":2,"B":5}
|
264 |
+
|
265 |
+
|
266 |
+
|
267 |
+
|
268 |
+
|
269 |
+
|
270 |
+
|
271 |
+
{"A":3,"B":6}
|
272 |
+
"""
|
273 |
+
orig = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
|
274 |
+
test = read_json(StringIO(j), lines=True, chunksize=chunksize)
|
275 |
+
if chunksize is not None:
|
276 |
+
with test:
|
277 |
+
test = pd.concat(test)
|
278 |
+
tm.assert_frame_equal(orig, test, obj=f"chunksize: {chunksize}")
|
279 |
+
|
280 |
+
|
281 |
+
def test_readjson_unicode(request, monkeypatch, engine):
|
282 |
+
if engine == "pyarrow":
|
283 |
+
# GH 48893
|
284 |
+
reason = (
|
285 |
+
"Pyarrow only supports a file path as an input and line delimited json"
|
286 |
+
"and doesn't support chunksize parameter."
|
287 |
+
)
|
288 |
+
request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
|
289 |
+
|
290 |
+
with tm.ensure_clean("test.json") as path:
|
291 |
+
monkeypatch.setattr("locale.getpreferredencoding", lambda do_setlocale: "cp949")
|
292 |
+
with open(path, "w", encoding="utf-8") as f:
|
293 |
+
f.write('{"£©µÀÆÖÞßéöÿ":["АБВГДабвгд가"]}')
|
294 |
+
|
295 |
+
result = read_json(path, engine=engine)
|
296 |
+
expected = DataFrame({"£©µÀÆÖÞßéöÿ": ["АБВГДабвгд가"]})
|
297 |
+
tm.assert_frame_equal(result, expected)
|
298 |
+
|
299 |
+
|
300 |
+
@pytest.mark.parametrize("nrows", [1, 2])
|
301 |
+
def test_readjson_nrows(nrows, engine):
|
302 |
+
# GH 33916
|
303 |
+
# Test reading line-format JSON to Series with nrows param
|
304 |
+
jsonl = """{"a": 1, "b": 2}
|
305 |
+
{"a": 3, "b": 4}
|
306 |
+
{"a": 5, "b": 6}
|
307 |
+
{"a": 7, "b": 8}"""
|
308 |
+
result = read_json(StringIO(jsonl), lines=True, nrows=nrows)
|
309 |
+
expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
|
310 |
+
tm.assert_frame_equal(result, expected)
|
311 |
+
|
312 |
+
|
313 |
+
@pytest.mark.parametrize("nrows,chunksize", [(2, 2), (4, 2)])
|
314 |
+
def test_readjson_nrows_chunks(request, nrows, chunksize, engine):
|
315 |
+
# GH 33916
|
316 |
+
# Test reading line-format JSON to Series with nrows and chunksize param
|
317 |
+
if engine == "pyarrow":
|
318 |
+
# GH 48893
|
319 |
+
reason = (
|
320 |
+
"Pyarrow only supports a file path as an input and line delimited json"
|
321 |
+
"and doesn't support chunksize parameter."
|
322 |
+
)
|
323 |
+
request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
|
324 |
+
|
325 |
+
jsonl = """{"a": 1, "b": 2}
|
326 |
+
{"a": 3, "b": 4}
|
327 |
+
{"a": 5, "b": 6}
|
328 |
+
{"a": 7, "b": 8}"""
|
329 |
+
|
330 |
+
if engine != "pyarrow":
|
331 |
+
with read_json(
|
332 |
+
StringIO(jsonl), lines=True, nrows=nrows, chunksize=chunksize, engine=engine
|
333 |
+
) as reader:
|
334 |
+
chunked = pd.concat(reader)
|
335 |
+
else:
|
336 |
+
with read_json(
|
337 |
+
jsonl, lines=True, nrows=nrows, chunksize=chunksize, engine=engine
|
338 |
+
) as reader:
|
339 |
+
chunked = pd.concat(reader)
|
340 |
+
expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
|
341 |
+
tm.assert_frame_equal(chunked, expected)
|
342 |
+
|
343 |
+
|
344 |
+
def test_readjson_nrows_requires_lines(engine):
|
345 |
+
# GH 33916
|
346 |
+
# Test ValueError raised if nrows is set without setting lines in read_json
|
347 |
+
jsonl = """{"a": 1, "b": 2}
|
348 |
+
{"a": 3, "b": 4}
|
349 |
+
{"a": 5, "b": 6}
|
350 |
+
{"a": 7, "b": 8}"""
|
351 |
+
msg = "nrows can only be passed if lines=True"
|
352 |
+
with pytest.raises(ValueError, match=msg):
|
353 |
+
read_json(jsonl, lines=False, nrows=2, engine=engine)
|
354 |
+
|
355 |
+
|
356 |
+
def test_readjson_lines_chunks_fileurl(request, datapath, engine):
|
357 |
+
# GH 27135
|
358 |
+
# Test reading line-format JSON from file url
|
359 |
+
if engine == "pyarrow":
|
360 |
+
# GH 48893
|
361 |
+
reason = (
|
362 |
+
"Pyarrow only supports a file path as an input and line delimited json"
|
363 |
+
"and doesn't support chunksize parameter."
|
364 |
+
)
|
365 |
+
request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
|
366 |
+
|
367 |
+
df_list_expected = [
|
368 |
+
DataFrame([[1, 2]], columns=["a", "b"], index=[0]),
|
369 |
+
DataFrame([[3, 4]], columns=["a", "b"], index=[1]),
|
370 |
+
DataFrame([[5, 6]], columns=["a", "b"], index=[2]),
|
371 |
+
]
|
372 |
+
os_path = datapath("io", "json", "data", "line_delimited.json")
|
373 |
+
file_url = Path(os_path).as_uri()
|
374 |
+
with read_json(file_url, lines=True, chunksize=1, engine=engine) as url_reader:
|
375 |
+
for index, chuck in enumerate(url_reader):
|
376 |
+
tm.assert_frame_equal(chuck, df_list_expected[index])
|
377 |
+
|
378 |
+
|
379 |
+
def test_chunksize_is_incremental():
|
380 |
+
# See https://github.com/pandas-dev/pandas/issues/34548
|
381 |
+
jsonl = (
|
382 |
+
"""{"a": 1, "b": 2}
|
383 |
+
{"a": 3, "b": 4}
|
384 |
+
{"a": 5, "b": 6}
|
385 |
+
{"a": 7, "b": 8}\n"""
|
386 |
+
* 1000
|
387 |
+
)
|
388 |
+
|
389 |
+
class MyReader:
|
390 |
+
def __init__(self, contents) -> None:
|
391 |
+
self.read_count = 0
|
392 |
+
self.stringio = StringIO(contents)
|
393 |
+
|
394 |
+
def read(self, *args):
|
395 |
+
self.read_count += 1
|
396 |
+
return self.stringio.read(*args)
|
397 |
+
|
398 |
+
def __iter__(self) -> Iterator:
|
399 |
+
self.read_count += 1
|
400 |
+
return iter(self.stringio)
|
401 |
+
|
402 |
+
reader = MyReader(jsonl)
|
403 |
+
assert len(list(read_json(reader, lines=True, chunksize=100))) > 1
|
404 |
+
assert reader.read_count > 10
|
405 |
+
|
406 |
+
|
407 |
+
@pytest.mark.parametrize("orient_", ["split", "index", "table"])
|
408 |
+
def test_to_json_append_orient(orient_):
|
409 |
+
# GH 35849
|
410 |
+
# Test ValueError when orient is not 'records'
|
411 |
+
df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
|
412 |
+
msg = (
|
413 |
+
r"mode='a' \(append\) is only supported when "
|
414 |
+
"lines is True and orient is 'records'"
|
415 |
+
)
|
416 |
+
with pytest.raises(ValueError, match=msg):
|
417 |
+
df.to_json(mode="a", orient=orient_)
|
418 |
+
|
419 |
+
|
420 |
+
def test_to_json_append_lines():
|
421 |
+
# GH 35849
|
422 |
+
# Test ValueError when lines is not True
|
423 |
+
df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
|
424 |
+
msg = (
|
425 |
+
r"mode='a' \(append\) is only supported when "
|
426 |
+
"lines is True and orient is 'records'"
|
427 |
+
)
|
428 |
+
with pytest.raises(ValueError, match=msg):
|
429 |
+
df.to_json(mode="a", lines=False, orient="records")
|
430 |
+
|
431 |
+
|
432 |
+
@pytest.mark.parametrize("mode_", ["r", "x"])
|
433 |
+
def test_to_json_append_mode(mode_):
|
434 |
+
# GH 35849
|
435 |
+
# Test ValueError when mode is not supported option
|
436 |
+
df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
|
437 |
+
msg = (
|
438 |
+
f"mode={mode_} is not a valid option."
|
439 |
+
"Only 'w' and 'a' are currently supported."
|
440 |
+
)
|
441 |
+
with pytest.raises(ValueError, match=msg):
|
442 |
+
df.to_json(mode=mode_, lines=False, orient="records")
|
443 |
+
|
444 |
+
|
445 |
+
def test_to_json_append_output_consistent_columns():
|
446 |
+
# GH 35849
|
447 |
+
# Testing that resulting output reads in as expected.
|
448 |
+
# Testing same columns, new rows
|
449 |
+
df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
|
450 |
+
df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})
|
451 |
+
|
452 |
+
expected = DataFrame({"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]})
|
453 |
+
with tm.ensure_clean("test.json") as path:
|
454 |
+
# Save dataframes to the same file
|
455 |
+
df1.to_json(path, lines=True, orient="records")
|
456 |
+
df2.to_json(path, mode="a", lines=True, orient="records")
|
457 |
+
|
458 |
+
# Read path file
|
459 |
+
result = read_json(path, lines=True)
|
460 |
+
tm.assert_frame_equal(result, expected)
|
461 |
+
|
462 |
+
|
463 |
+
def test_to_json_append_output_inconsistent_columns():
|
464 |
+
# GH 35849
|
465 |
+
# Testing that resulting output reads in as expected.
|
466 |
+
# Testing one new column, one old column, new rows
|
467 |
+
df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
|
468 |
+
df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})
|
469 |
+
|
470 |
+
expected = DataFrame(
|
471 |
+
{
|
472 |
+
"col1": [1, 2, None, None],
|
473 |
+
"col2": ["a", "b", "e", "f"],
|
474 |
+
"col3": [np.nan, np.nan, "!", "#"],
|
475 |
+
}
|
476 |
+
)
|
477 |
+
with tm.ensure_clean("test.json") as path:
|
478 |
+
# Save dataframes to the same file
|
479 |
+
df1.to_json(path, mode="a", lines=True, orient="records")
|
480 |
+
df3.to_json(path, mode="a", lines=True, orient="records")
|
481 |
+
|
482 |
+
# Read path file
|
483 |
+
result = read_json(path, lines=True)
|
484 |
+
tm.assert_frame_equal(result, expected)
|
485 |
+
|
486 |
+
|
487 |
+
def test_to_json_append_output_different_columns():
|
488 |
+
# GH 35849
|
489 |
+
# Testing that resulting output reads in as expected.
|
490 |
+
# Testing same, differing and new columns
|
491 |
+
df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
|
492 |
+
df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})
|
493 |
+
df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})
|
494 |
+
df4 = DataFrame({"col4": [True, False]})
|
495 |
+
|
496 |
+
expected = DataFrame(
|
497 |
+
{
|
498 |
+
"col1": [1, 2, 3, 4, None, None, None, None],
|
499 |
+
"col2": ["a", "b", "c", "d", "e", "f", np.nan, np.nan],
|
500 |
+
"col3": [np.nan, np.nan, np.nan, np.nan, "!", "#", np.nan, np.nan],
|
501 |
+
"col4": [None, None, None, None, None, None, True, False],
|
502 |
+
}
|
503 |
+
).astype({"col4": "float"})
|
504 |
+
with tm.ensure_clean("test.json") as path:
|
505 |
+
# Save dataframes to the same file
|
506 |
+
df1.to_json(path, mode="a", lines=True, orient="records")
|
507 |
+
df2.to_json(path, mode="a", lines=True, orient="records")
|
508 |
+
df3.to_json(path, mode="a", lines=True, orient="records")
|
509 |
+
df4.to_json(path, mode="a", lines=True, orient="records")
|
510 |
+
|
511 |
+
# Read path file
|
512 |
+
result = read_json(path, lines=True)
|
513 |
+
tm.assert_frame_equal(result, expected)
|
514 |
+
|
515 |
+
|
516 |
+
def test_to_json_append_output_different_columns_reordered():
|
517 |
+
# GH 35849
|
518 |
+
# Testing that resulting output reads in as expected.
|
519 |
+
# Testing specific result column order.
|
520 |
+
df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
|
521 |
+
df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})
|
522 |
+
df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})
|
523 |
+
df4 = DataFrame({"col4": [True, False]})
|
524 |
+
|
525 |
+
# df4, df3, df2, df1 (in that order)
|
526 |
+
expected = DataFrame(
|
527 |
+
{
|
528 |
+
"col4": [True, False, None, None, None, None, None, None],
|
529 |
+
"col2": [np.nan, np.nan, "e", "f", "c", "d", "a", "b"],
|
530 |
+
"col3": [np.nan, np.nan, "!", "#", np.nan, np.nan, np.nan, np.nan],
|
531 |
+
"col1": [None, None, None, None, 3, 4, 1, 2],
|
532 |
+
}
|
533 |
+
).astype({"col4": "float"})
|
534 |
+
with tm.ensure_clean("test.json") as path:
|
535 |
+
# Save dataframes to the same file
|
536 |
+
df4.to_json(path, mode="a", lines=True, orient="records")
|
537 |
+
df3.to_json(path, mode="a", lines=True, orient="records")
|
538 |
+
df2.to_json(path, mode="a", lines=True, orient="records")
|
539 |
+
df1.to_json(path, mode="a", lines=True, orient="records")
|
540 |
+
|
541 |
+
# Read path file
|
542 |
+
result = read_json(path, lines=True)
|
543 |
+
tm.assert_frame_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py
ADDED
@@ -0,0 +1,1087 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import calendar
|
2 |
+
import datetime
|
3 |
+
import decimal
|
4 |
+
import json
|
5 |
+
import locale
|
6 |
+
import math
|
7 |
+
import re
|
8 |
+
import time
|
9 |
+
|
10 |
+
import dateutil
|
11 |
+
import numpy as np
|
12 |
+
import pytest
|
13 |
+
import pytz
|
14 |
+
|
15 |
+
import pandas._libs.json as ujson
|
16 |
+
from pandas.compat import IS64
|
17 |
+
|
18 |
+
from pandas import (
|
19 |
+
DataFrame,
|
20 |
+
DatetimeIndex,
|
21 |
+
Index,
|
22 |
+
NaT,
|
23 |
+
PeriodIndex,
|
24 |
+
Series,
|
25 |
+
Timedelta,
|
26 |
+
Timestamp,
|
27 |
+
date_range,
|
28 |
+
)
|
29 |
+
import pandas._testing as tm
|
30 |
+
|
31 |
+
|
32 |
+
def _clean_dict(d):
|
33 |
+
"""
|
34 |
+
Sanitize dictionary for JSON by converting all keys to strings.
|
35 |
+
|
36 |
+
Parameters
|
37 |
+
----------
|
38 |
+
d : dict
|
39 |
+
The dictionary to convert.
|
40 |
+
|
41 |
+
Returns
|
42 |
+
-------
|
43 |
+
cleaned_dict : dict
|
44 |
+
"""
|
45 |
+
return {str(k): v for k, v in d.items()}
|
46 |
+
|
47 |
+
|
48 |
+
@pytest.fixture(
|
49 |
+
params=[None, "split", "records", "values", "index"] # Column indexed by default.
|
50 |
+
)
|
51 |
+
def orient(request):
|
52 |
+
return request.param
|
53 |
+
|
54 |
+
|
55 |
+
class TestUltraJSONTests:
|
56 |
+
@pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865")
|
57 |
+
def test_encode_decimal(self):
|
58 |
+
sut = decimal.Decimal("1337.1337")
|
59 |
+
encoded = ujson.ujson_dumps(sut, double_precision=15)
|
60 |
+
decoded = ujson.ujson_loads(encoded)
|
61 |
+
assert decoded == 1337.1337
|
62 |
+
|
63 |
+
sut = decimal.Decimal("0.95")
|
64 |
+
encoded = ujson.ujson_dumps(sut, double_precision=1)
|
65 |
+
assert encoded == "1.0"
|
66 |
+
|
67 |
+
decoded = ujson.ujson_loads(encoded)
|
68 |
+
assert decoded == 1.0
|
69 |
+
|
70 |
+
sut = decimal.Decimal("0.94")
|
71 |
+
encoded = ujson.ujson_dumps(sut, double_precision=1)
|
72 |
+
assert encoded == "0.9"
|
73 |
+
|
74 |
+
decoded = ujson.ujson_loads(encoded)
|
75 |
+
assert decoded == 0.9
|
76 |
+
|
77 |
+
sut = decimal.Decimal("1.95")
|
78 |
+
encoded = ujson.ujson_dumps(sut, double_precision=1)
|
79 |
+
assert encoded == "2.0"
|
80 |
+
|
81 |
+
decoded = ujson.ujson_loads(encoded)
|
82 |
+
assert decoded == 2.0
|
83 |
+
|
84 |
+
sut = decimal.Decimal("-1.95")
|
85 |
+
encoded = ujson.ujson_dumps(sut, double_precision=1)
|
86 |
+
assert encoded == "-2.0"
|
87 |
+
|
88 |
+
decoded = ujson.ujson_loads(encoded)
|
89 |
+
assert decoded == -2.0
|
90 |
+
|
91 |
+
sut = decimal.Decimal("0.995")
|
92 |
+
encoded = ujson.ujson_dumps(sut, double_precision=2)
|
93 |
+
assert encoded == "1.0"
|
94 |
+
|
95 |
+
decoded = ujson.ujson_loads(encoded)
|
96 |
+
assert decoded == 1.0
|
97 |
+
|
98 |
+
sut = decimal.Decimal("0.9995")
|
99 |
+
encoded = ujson.ujson_dumps(sut, double_precision=3)
|
100 |
+
assert encoded == "1.0"
|
101 |
+
|
102 |
+
decoded = ujson.ujson_loads(encoded)
|
103 |
+
assert decoded == 1.0
|
104 |
+
|
105 |
+
sut = decimal.Decimal("0.99999999999999944")
|
106 |
+
encoded = ujson.ujson_dumps(sut, double_precision=15)
|
107 |
+
assert encoded == "1.0"
|
108 |
+
|
109 |
+
decoded = ujson.ujson_loads(encoded)
|
110 |
+
assert decoded == 1.0
|
111 |
+
|
112 |
+
@pytest.mark.parametrize("ensure_ascii", [True, False])
|
113 |
+
def test_encode_string_conversion(self, ensure_ascii):
|
114 |
+
string_input = "A string \\ / \b \f \n \r \t </script> &"
|
115 |
+
not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"'
|
116 |
+
html_encoded = (
|
117 |
+
'"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"'
|
118 |
+
)
|
119 |
+
|
120 |
+
def helper(expected_output, **encode_kwargs):
|
121 |
+
output = ujson.ujson_dumps(
|
122 |
+
string_input, ensure_ascii=ensure_ascii, **encode_kwargs
|
123 |
+
)
|
124 |
+
|
125 |
+
assert output == expected_output
|
126 |
+
assert string_input == json.loads(output)
|
127 |
+
assert string_input == ujson.ujson_loads(output)
|
128 |
+
|
129 |
+
# Default behavior assumes encode_html_chars=False.
|
130 |
+
helper(not_html_encoded)
|
131 |
+
|
132 |
+
# Make sure explicit encode_html_chars=False works.
|
133 |
+
helper(not_html_encoded, encode_html_chars=False)
|
134 |
+
|
135 |
+
# Make sure explicit encode_html_chars=True does the encoding.
|
136 |
+
helper(html_encoded, encode_html_chars=True)
|
137 |
+
|
138 |
+
@pytest.mark.parametrize(
|
139 |
+
"long_number", [-4342969734183514, -12345678901234.56789012, -528656961.4399388]
|
140 |
+
)
|
141 |
+
def test_double_long_numbers(self, long_number):
|
142 |
+
sut = {"a": long_number}
|
143 |
+
encoded = ujson.ujson_dumps(sut, double_precision=15)
|
144 |
+
|
145 |
+
decoded = ujson.ujson_loads(encoded)
|
146 |
+
assert sut == decoded
|
147 |
+
|
148 |
+
def test_encode_non_c_locale(self):
|
149 |
+
lc_category = locale.LC_NUMERIC
|
150 |
+
|
151 |
+
# We just need one of these locales to work.
|
152 |
+
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
|
153 |
+
if tm.can_set_locale(new_locale, lc_category):
|
154 |
+
with tm.set_locale(new_locale, lc_category):
|
155 |
+
assert ujson.ujson_loads(ujson.ujson_dumps(4.78e60)) == 4.78e60
|
156 |
+
assert ujson.ujson_loads("4.78", precise_float=True) == 4.78
|
157 |
+
break
|
158 |
+
|
159 |
+
def test_decimal_decode_test_precise(self):
|
160 |
+
sut = {"a": 4.56}
|
161 |
+
encoded = ujson.ujson_dumps(sut)
|
162 |
+
decoded = ujson.ujson_loads(encoded, precise_float=True)
|
163 |
+
assert sut == decoded
|
164 |
+
|
165 |
+
def test_encode_double_tiny_exponential(self):
|
166 |
+
num = 1e-40
|
167 |
+
assert num == ujson.ujson_loads(ujson.ujson_dumps(num))
|
168 |
+
num = 1e-100
|
169 |
+
assert num == ujson.ujson_loads(ujson.ujson_dumps(num))
|
170 |
+
num = -1e-45
|
171 |
+
assert num == ujson.ujson_loads(ujson.ujson_dumps(num))
|
172 |
+
num = -1e-145
|
173 |
+
assert np.allclose(num, ujson.ujson_loads(ujson.ujson_dumps(num)))
|
174 |
+
|
175 |
+
@pytest.mark.parametrize("unicode_key", ["key1", "بن"])
|
176 |
+
def test_encode_dict_with_unicode_keys(self, unicode_key):
|
177 |
+
unicode_dict = {unicode_key: "value1"}
|
178 |
+
assert unicode_dict == ujson.ujson_loads(ujson.ujson_dumps(unicode_dict))
|
179 |
+
|
180 |
+
@pytest.mark.parametrize(
|
181 |
+
"double_input", [math.pi, -math.pi] # Should work with negatives too.
|
182 |
+
)
|
183 |
+
def test_encode_double_conversion(self, double_input):
|
184 |
+
output = ujson.ujson_dumps(double_input)
|
185 |
+
assert round(double_input, 5) == round(json.loads(output), 5)
|
186 |
+
assert round(double_input, 5) == round(ujson.ujson_loads(output), 5)
|
187 |
+
|
188 |
+
def test_encode_with_decimal(self):
|
189 |
+
decimal_input = 1.0
|
190 |
+
output = ujson.ujson_dumps(decimal_input)
|
191 |
+
|
192 |
+
assert output == "1.0"
|
193 |
+
|
194 |
+
def test_encode_array_of_nested_arrays(self):
|
195 |
+
nested_input = [[[[]]]] * 20
|
196 |
+
output = ujson.ujson_dumps(nested_input)
|
197 |
+
|
198 |
+
assert nested_input == json.loads(output)
|
199 |
+
assert nested_input == ujson.ujson_loads(output)
|
200 |
+
|
201 |
+
def test_encode_array_of_doubles(self):
|
202 |
+
doubles_input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
|
203 |
+
output = ujson.ujson_dumps(doubles_input)
|
204 |
+
|
205 |
+
assert doubles_input == json.loads(output)
|
206 |
+
assert doubles_input == ujson.ujson_loads(output)
|
207 |
+
|
208 |
+
def test_double_precision(self):
|
209 |
+
double_input = 30.012345678901234
|
210 |
+
output = ujson.ujson_dumps(double_input, double_precision=15)
|
211 |
+
|
212 |
+
assert double_input == json.loads(output)
|
213 |
+
assert double_input == ujson.ujson_loads(output)
|
214 |
+
|
215 |
+
for double_precision in (3, 9):
|
216 |
+
output = ujson.ujson_dumps(double_input, double_precision=double_precision)
|
217 |
+
rounded_input = round(double_input, double_precision)
|
218 |
+
|
219 |
+
assert rounded_input == json.loads(output)
|
220 |
+
assert rounded_input == ujson.ujson_loads(output)
|
221 |
+
|
222 |
+
@pytest.mark.parametrize(
|
223 |
+
"invalid_val",
|
224 |
+
[
|
225 |
+
20,
|
226 |
+
-1,
|
227 |
+
"9",
|
228 |
+
None,
|
229 |
+
],
|
230 |
+
)
|
231 |
+
def test_invalid_double_precision(self, invalid_val):
|
232 |
+
double_input = 30.12345678901234567890
|
233 |
+
expected_exception = ValueError if isinstance(invalid_val, int) else TypeError
|
234 |
+
msg = (
|
235 |
+
r"Invalid value '.*' for option 'double_precision', max is '15'|"
|
236 |
+
r"an integer is required \(got type |"
|
237 |
+
r"object cannot be interpreted as an integer"
|
238 |
+
)
|
239 |
+
with pytest.raises(expected_exception, match=msg):
|
240 |
+
ujson.ujson_dumps(double_input, double_precision=invalid_val)
|
241 |
+
|
242 |
+
def test_encode_string_conversion2(self):
|
243 |
+
string_input = "A string \\ / \b \f \n \r \t"
|
244 |
+
output = ujson.ujson_dumps(string_input)
|
245 |
+
|
246 |
+
assert string_input == json.loads(output)
|
247 |
+
assert string_input == ujson.ujson_loads(output)
|
248 |
+
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
|
249 |
+
|
250 |
+
@pytest.mark.parametrize(
|
251 |
+
"unicode_input",
|
252 |
+
["Räksmörgås اسامة بن محمد بن عوض بن لادن", "\xe6\x97\xa5\xd1\x88"],
|
253 |
+
)
|
254 |
+
def test_encode_unicode_conversion(self, unicode_input):
|
255 |
+
enc = ujson.ujson_dumps(unicode_input)
|
256 |
+
dec = ujson.ujson_loads(enc)
|
257 |
+
|
258 |
+
assert enc == json.dumps(unicode_input)
|
259 |
+
assert dec == json.loads(enc)
|
260 |
+
|
261 |
+
def test_encode_control_escaping(self):
|
262 |
+
escaped_input = "\x19"
|
263 |
+
enc = ujson.ujson_dumps(escaped_input)
|
264 |
+
dec = ujson.ujson_loads(enc)
|
265 |
+
|
266 |
+
assert escaped_input == dec
|
267 |
+
assert enc == json.dumps(escaped_input)
|
268 |
+
|
269 |
+
def test_encode_unicode_surrogate_pair(self):
|
270 |
+
surrogate_input = "\xf0\x90\x8d\x86"
|
271 |
+
enc = ujson.ujson_dumps(surrogate_input)
|
272 |
+
dec = ujson.ujson_loads(enc)
|
273 |
+
|
274 |
+
assert enc == json.dumps(surrogate_input)
|
275 |
+
assert dec == json.loads(enc)
|
276 |
+
|
277 |
+
def test_encode_unicode_4bytes_utf8(self):
|
278 |
+
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
|
279 |
+
enc = ujson.ujson_dumps(four_bytes_input)
|
280 |
+
dec = ujson.ujson_loads(enc)
|
281 |
+
|
282 |
+
assert enc == json.dumps(four_bytes_input)
|
283 |
+
assert dec == json.loads(enc)
|
284 |
+
|
285 |
+
def test_encode_unicode_4bytes_utf8highest(self):
|
286 |
+
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
|
287 |
+
enc = ujson.ujson_dumps(four_bytes_input)
|
288 |
+
|
289 |
+
dec = ujson.ujson_loads(enc)
|
290 |
+
|
291 |
+
assert enc == json.dumps(four_bytes_input)
|
292 |
+
assert dec == json.loads(enc)
|
293 |
+
|
294 |
+
def test_encode_unicode_error(self):
|
295 |
+
string = "'\udac0'"
|
296 |
+
msg = (
|
297 |
+
r"'utf-8' codec can't encode character '\\udac0' "
|
298 |
+
r"in position 1: surrogates not allowed"
|
299 |
+
)
|
300 |
+
with pytest.raises(UnicodeEncodeError, match=msg):
|
301 |
+
ujson.ujson_dumps([string])
|
302 |
+
|
303 |
+
def test_encode_array_in_array(self):
|
304 |
+
arr_in_arr_input = [[[[]]]]
|
305 |
+
output = ujson.ujson_dumps(arr_in_arr_input)
|
306 |
+
|
307 |
+
assert arr_in_arr_input == json.loads(output)
|
308 |
+
assert output == json.dumps(arr_in_arr_input)
|
309 |
+
assert arr_in_arr_input == ujson.ujson_loads(output)
|
310 |
+
|
311 |
+
@pytest.mark.parametrize(
|
312 |
+
"num_input",
|
313 |
+
[
|
314 |
+
31337,
|
315 |
+
-31337, # Negative number.
|
316 |
+
-9223372036854775808, # Large negative number.
|
317 |
+
],
|
318 |
+
)
|
319 |
+
def test_encode_num_conversion(self, num_input):
|
320 |
+
output = ujson.ujson_dumps(num_input)
|
321 |
+
assert num_input == json.loads(output)
|
322 |
+
assert output == json.dumps(num_input)
|
323 |
+
assert num_input == ujson.ujson_loads(output)
|
324 |
+
|
325 |
+
def test_encode_list_conversion(self):
|
326 |
+
list_input = [1, 2, 3, 4]
|
327 |
+
output = ujson.ujson_dumps(list_input)
|
328 |
+
|
329 |
+
assert list_input == json.loads(output)
|
330 |
+
assert list_input == ujson.ujson_loads(output)
|
331 |
+
|
332 |
+
def test_encode_dict_conversion(self):
|
333 |
+
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
|
334 |
+
output = ujson.ujson_dumps(dict_input)
|
335 |
+
|
336 |
+
assert dict_input == json.loads(output)
|
337 |
+
assert dict_input == ujson.ujson_loads(output)
|
338 |
+
|
339 |
+
@pytest.mark.parametrize("builtin_value", [None, True, False])
|
340 |
+
def test_encode_builtin_values_conversion(self, builtin_value):
|
341 |
+
output = ujson.ujson_dumps(builtin_value)
|
342 |
+
assert builtin_value == json.loads(output)
|
343 |
+
assert output == json.dumps(builtin_value)
|
344 |
+
assert builtin_value == ujson.ujson_loads(output)
|
345 |
+
|
346 |
+
def test_encode_datetime_conversion(self):
|
347 |
+
datetime_input = datetime.datetime.fromtimestamp(time.time())
|
348 |
+
output = ujson.ujson_dumps(datetime_input, date_unit="s")
|
349 |
+
expected = calendar.timegm(datetime_input.utctimetuple())
|
350 |
+
|
351 |
+
assert int(expected) == json.loads(output)
|
352 |
+
assert int(expected) == ujson.ujson_loads(output)
|
353 |
+
|
354 |
+
def test_encode_date_conversion(self):
|
355 |
+
date_input = datetime.date.fromtimestamp(time.time())
|
356 |
+
output = ujson.ujson_dumps(date_input, date_unit="s")
|
357 |
+
|
358 |
+
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
|
359 |
+
expected = calendar.timegm(tup)
|
360 |
+
|
361 |
+
assert int(expected) == json.loads(output)
|
362 |
+
assert int(expected) == ujson.ujson_loads(output)
|
363 |
+
|
364 |
+
@pytest.mark.parametrize(
|
365 |
+
"test",
|
366 |
+
[datetime.time(), datetime.time(1, 2, 3), datetime.time(10, 12, 15, 343243)],
|
367 |
+
)
|
368 |
+
def test_encode_time_conversion_basic(self, test):
|
369 |
+
output = ujson.ujson_dumps(test)
|
370 |
+
expected = f'"{test.isoformat()}"'
|
371 |
+
assert expected == output
|
372 |
+
|
373 |
+
def test_encode_time_conversion_pytz(self):
|
374 |
+
# see gh-11473: to_json segfaults with timezone-aware datetimes
|
375 |
+
test = datetime.time(10, 12, 15, 343243, pytz.utc)
|
376 |
+
output = ujson.ujson_dumps(test)
|
377 |
+
expected = f'"{test.isoformat()}"'
|
378 |
+
assert expected == output
|
379 |
+
|
380 |
+
def test_encode_time_conversion_dateutil(self):
|
381 |
+
# see gh-11473: to_json segfaults with timezone-aware datetimes
|
382 |
+
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
|
383 |
+
output = ujson.ujson_dumps(test)
|
384 |
+
expected = f'"{test.isoformat()}"'
|
385 |
+
assert expected == output
|
386 |
+
|
387 |
+
@pytest.mark.parametrize(
|
388 |
+
"decoded_input", [NaT, np.datetime64("NaT"), np.nan, np.inf, -np.inf]
|
389 |
+
)
|
390 |
+
def test_encode_as_null(self, decoded_input):
|
391 |
+
assert ujson.ujson_dumps(decoded_input) == "null", "Expected null"
|
392 |
+
|
393 |
+
def test_datetime_units(self):
|
394 |
+
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
|
395 |
+
stamp = Timestamp(val).as_unit("ns")
|
396 |
+
|
397 |
+
roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="s"))
|
398 |
+
assert roundtrip == stamp._value // 10**9
|
399 |
+
|
400 |
+
roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ms"))
|
401 |
+
assert roundtrip == stamp._value // 10**6
|
402 |
+
|
403 |
+
roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="us"))
|
404 |
+
assert roundtrip == stamp._value // 10**3
|
405 |
+
|
406 |
+
roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ns"))
|
407 |
+
assert roundtrip == stamp._value
|
408 |
+
|
409 |
+
msg = "Invalid value 'foo' for option 'date_unit'"
|
410 |
+
with pytest.raises(ValueError, match=msg):
|
411 |
+
ujson.ujson_dumps(val, date_unit="foo")
|
412 |
+
|
413 |
+
def test_encode_to_utf8(self):
|
414 |
+
unencoded = "\xe6\x97\xa5\xd1\x88"
|
415 |
+
|
416 |
+
enc = ujson.ujson_dumps(unencoded, ensure_ascii=False)
|
417 |
+
dec = ujson.ujson_loads(enc)
|
418 |
+
|
419 |
+
assert enc == json.dumps(unencoded, ensure_ascii=False)
|
420 |
+
assert dec == json.loads(enc)
|
421 |
+
|
422 |
+
def test_decode_from_unicode(self):
|
423 |
+
unicode_input = '{"obj": 31337}'
|
424 |
+
|
425 |
+
dec1 = ujson.ujson_loads(unicode_input)
|
426 |
+
dec2 = ujson.ujson_loads(str(unicode_input))
|
427 |
+
|
428 |
+
assert dec1 == dec2
|
429 |
+
|
430 |
+
def test_encode_recursion_max(self):
|
431 |
+
# 8 is the max recursion depth
|
432 |
+
|
433 |
+
class O2:
|
434 |
+
member = 0
|
435 |
+
|
436 |
+
class O1:
|
437 |
+
member = 0
|
438 |
+
|
439 |
+
decoded_input = O1()
|
440 |
+
decoded_input.member = O2()
|
441 |
+
decoded_input.member.member = decoded_input
|
442 |
+
|
443 |
+
with pytest.raises(OverflowError, match="Maximum recursion level reached"):
|
444 |
+
ujson.ujson_dumps(decoded_input)
|
445 |
+
|
446 |
+
def test_decode_jibberish(self):
|
447 |
+
jibberish = "fdsa sda v9sa fdsa"
|
448 |
+
msg = "Unexpected character found when decoding 'false'"
|
449 |
+
with pytest.raises(ValueError, match=msg):
|
450 |
+
ujson.ujson_loads(jibberish)
|
451 |
+
|
452 |
+
@pytest.mark.parametrize(
|
453 |
+
"broken_json",
|
454 |
+
[
|
455 |
+
"[", # Broken array start.
|
456 |
+
"{", # Broken object start.
|
457 |
+
"]", # Broken array end.
|
458 |
+
"}", # Broken object end.
|
459 |
+
],
|
460 |
+
)
|
461 |
+
def test_decode_broken_json(self, broken_json):
|
462 |
+
msg = "Expected object or value"
|
463 |
+
with pytest.raises(ValueError, match=msg):
|
464 |
+
ujson.ujson_loads(broken_json)
|
465 |
+
|
466 |
+
@pytest.mark.parametrize("too_big_char", ["[", "{"])
|
467 |
+
def test_decode_depth_too_big(self, too_big_char):
|
468 |
+
with pytest.raises(ValueError, match="Reached object decoding depth limit"):
|
469 |
+
ujson.ujson_loads(too_big_char * (1024 * 1024))
|
470 |
+
|
471 |
+
@pytest.mark.parametrize(
|
472 |
+
"bad_string",
|
473 |
+
[
|
474 |
+
'"TESTING', # Unterminated.
|
475 |
+
'"TESTING\\"', # Unterminated escape.
|
476 |
+
"tru", # Broken True.
|
477 |
+
"fa", # Broken False.
|
478 |
+
"n", # Broken None.
|
479 |
+
],
|
480 |
+
)
|
481 |
+
def test_decode_bad_string(self, bad_string):
|
482 |
+
msg = (
|
483 |
+
"Unexpected character found when decoding|"
|
484 |
+
"Unmatched ''\"' when when decoding 'string'"
|
485 |
+
)
|
486 |
+
with pytest.raises(ValueError, match=msg):
|
487 |
+
ujson.ujson_loads(bad_string)
|
488 |
+
|
489 |
+
@pytest.mark.parametrize(
|
490 |
+
"broken_json, err_msg",
|
491 |
+
[
|
492 |
+
(
|
493 |
+
'{{1337:""}}',
|
494 |
+
"Key name of object must be 'string' when decoding 'object'",
|
495 |
+
),
|
496 |
+
('{{"key":"}', "Unmatched ''\"' when when decoding 'string'"),
|
497 |
+
("[[[true", "Unexpected character found when decoding array value (2)"),
|
498 |
+
],
|
499 |
+
)
|
500 |
+
def test_decode_broken_json_leak(self, broken_json, err_msg):
|
501 |
+
for _ in range(1000):
|
502 |
+
with pytest.raises(ValueError, match=re.escape(err_msg)):
|
503 |
+
ujson.ujson_loads(broken_json)
|
504 |
+
|
505 |
+
@pytest.mark.parametrize(
|
506 |
+
"invalid_dict",
|
507 |
+
[
|
508 |
+
"{{{{31337}}}}", # No key.
|
509 |
+
'{{{{"key":}}}}', # No value.
|
510 |
+
'{{{{"key"}}}}', # No colon or value.
|
511 |
+
],
|
512 |
+
)
|
513 |
+
def test_decode_invalid_dict(self, invalid_dict):
|
514 |
+
msg = (
|
515 |
+
"Key name of object must be 'string' when decoding 'object'|"
|
516 |
+
"No ':' found when decoding object value|"
|
517 |
+
"Expected object or value"
|
518 |
+
)
|
519 |
+
with pytest.raises(ValueError, match=msg):
|
520 |
+
ujson.ujson_loads(invalid_dict)
|
521 |
+
|
522 |
+
@pytest.mark.parametrize(
|
523 |
+
"numeric_int_as_str", ["31337", "-31337"] # Should work with negatives.
|
524 |
+
)
|
525 |
+
def test_decode_numeric_int(self, numeric_int_as_str):
|
526 |
+
assert int(numeric_int_as_str) == ujson.ujson_loads(numeric_int_as_str)
|
527 |
+
|
528 |
+
def test_encode_null_character(self):
|
529 |
+
wrapped_input = "31337 \x00 1337"
|
530 |
+
output = ujson.ujson_dumps(wrapped_input)
|
531 |
+
|
532 |
+
assert wrapped_input == json.loads(output)
|
533 |
+
assert output == json.dumps(wrapped_input)
|
534 |
+
assert wrapped_input == ujson.ujson_loads(output)
|
535 |
+
|
536 |
+
alone_input = "\x00"
|
537 |
+
output = ujson.ujson_dumps(alone_input)
|
538 |
+
|
539 |
+
assert alone_input == json.loads(output)
|
540 |
+
assert output == json.dumps(alone_input)
|
541 |
+
assert alone_input == ujson.ujson_loads(output)
|
542 |
+
assert '" \\u0000\\r\\n "' == ujson.ujson_dumps(" \u0000\r\n ")
|
543 |
+
|
544 |
+
def test_decode_null_character(self):
|
545 |
+
wrapped_input = '"31337 \\u0000 31337"'
|
546 |
+
assert ujson.ujson_loads(wrapped_input) == json.loads(wrapped_input)
|
547 |
+
|
548 |
+
def test_encode_list_long_conversion(self):
|
549 |
+
long_input = [
|
550 |
+
9223372036854775807,
|
551 |
+
9223372036854775807,
|
552 |
+
9223372036854775807,
|
553 |
+
9223372036854775807,
|
554 |
+
9223372036854775807,
|
555 |
+
9223372036854775807,
|
556 |
+
]
|
557 |
+
output = ujson.ujson_dumps(long_input)
|
558 |
+
|
559 |
+
assert long_input == json.loads(output)
|
560 |
+
assert long_input == ujson.ujson_loads(output)
|
561 |
+
|
562 |
+
@pytest.mark.parametrize("long_input", [9223372036854775807, 18446744073709551615])
|
563 |
+
def test_encode_long_conversion(self, long_input):
|
564 |
+
output = ujson.ujson_dumps(long_input)
|
565 |
+
|
566 |
+
assert long_input == json.loads(output)
|
567 |
+
assert output == json.dumps(long_input)
|
568 |
+
assert long_input == ujson.ujson_loads(output)
|
569 |
+
|
570 |
+
@pytest.mark.parametrize("bigNum", [2**64, -(2**63) - 1])
|
571 |
+
def test_dumps_ints_larger_than_maxsize(self, bigNum):
|
572 |
+
encoding = ujson.ujson_dumps(bigNum)
|
573 |
+
assert str(bigNum) == encoding
|
574 |
+
|
575 |
+
with pytest.raises(
|
576 |
+
ValueError,
|
577 |
+
match="Value is too big|Value is too small",
|
578 |
+
):
|
579 |
+
assert ujson.ujson_loads(encoding) == bigNum
|
580 |
+
|
581 |
+
@pytest.mark.parametrize(
|
582 |
+
"int_exp", ["1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"]
|
583 |
+
)
|
584 |
+
def test_decode_numeric_int_exp(self, int_exp):
|
585 |
+
assert ujson.ujson_loads(int_exp) == json.loads(int_exp)
|
586 |
+
|
587 |
+
def test_loads_non_str_bytes_raises(self):
|
588 |
+
msg = "a bytes-like object is required, not 'NoneType'"
|
589 |
+
with pytest.raises(TypeError, match=msg):
|
590 |
+
ujson.ujson_loads(None)
|
591 |
+
|
592 |
+
@pytest.mark.parametrize("val", [3590016419, 2**31, 2**32, (2**32) - 1])
|
593 |
+
def test_decode_number_with_32bit_sign_bit(self, val):
|
594 |
+
# Test that numbers that fit within 32 bits but would have the
|
595 |
+
# sign bit set (2**31 <= x < 2**32) are decoded properly.
|
596 |
+
doc = f'{{"id": {val}}}'
|
597 |
+
assert ujson.ujson_loads(doc)["id"] == val
|
598 |
+
|
599 |
+
def test_encode_big_escape(self):
|
600 |
+
# Make sure no Exception is raised.
|
601 |
+
for _ in range(10):
|
602 |
+
base = "\u00e5".encode()
|
603 |
+
escape_input = base * 1024 * 1024 * 2
|
604 |
+
ujson.ujson_dumps(escape_input)
|
605 |
+
|
606 |
+
def test_decode_big_escape(self):
|
607 |
+
# Make sure no Exception is raised.
|
608 |
+
for _ in range(10):
|
609 |
+
base = "\u00e5".encode()
|
610 |
+
quote = b'"'
|
611 |
+
|
612 |
+
escape_input = quote + (base * 1024 * 1024 * 2) + quote
|
613 |
+
ujson.ujson_loads(escape_input)
|
614 |
+
|
615 |
+
def test_to_dict(self):
|
616 |
+
d = {"key": 31337}
|
617 |
+
|
618 |
+
class DictTest:
|
619 |
+
def toDict(self):
|
620 |
+
return d
|
621 |
+
|
622 |
+
o = DictTest()
|
623 |
+
output = ujson.ujson_dumps(o)
|
624 |
+
|
625 |
+
dec = ujson.ujson_loads(output)
|
626 |
+
assert dec == d
|
627 |
+
|
628 |
+
def test_default_handler(self):
|
629 |
+
class _TestObject:
|
630 |
+
def __init__(self, val) -> None:
|
631 |
+
self.val = val
|
632 |
+
|
633 |
+
@property
|
634 |
+
def recursive_attr(self):
|
635 |
+
return _TestObject("recursive_attr")
|
636 |
+
|
637 |
+
def __str__(self) -> str:
|
638 |
+
return str(self.val)
|
639 |
+
|
640 |
+
msg = "Maximum recursion level reached"
|
641 |
+
with pytest.raises(OverflowError, match=msg):
|
642 |
+
ujson.ujson_dumps(_TestObject("foo"))
|
643 |
+
assert '"foo"' == ujson.ujson_dumps(_TestObject("foo"), default_handler=str)
|
644 |
+
|
645 |
+
def my_handler(_):
|
646 |
+
return "foobar"
|
647 |
+
|
648 |
+
assert '"foobar"' == ujson.ujson_dumps(
|
649 |
+
_TestObject("foo"), default_handler=my_handler
|
650 |
+
)
|
651 |
+
|
652 |
+
def my_handler_raises(_):
|
653 |
+
raise TypeError("I raise for anything")
|
654 |
+
|
655 |
+
with pytest.raises(TypeError, match="I raise for anything"):
|
656 |
+
ujson.ujson_dumps(_TestObject("foo"), default_handler=my_handler_raises)
|
657 |
+
|
658 |
+
def my_int_handler(_):
|
659 |
+
return 42
|
660 |
+
|
661 |
+
assert (
|
662 |
+
ujson.ujson_loads(
|
663 |
+
ujson.ujson_dumps(_TestObject("foo"), default_handler=my_int_handler)
|
664 |
+
)
|
665 |
+
== 42
|
666 |
+
)
|
667 |
+
|
668 |
+
def my_obj_handler(_):
|
669 |
+
return datetime.datetime(2013, 2, 3)
|
670 |
+
|
671 |
+
assert ujson.ujson_loads(
|
672 |
+
ujson.ujson_dumps(datetime.datetime(2013, 2, 3))
|
673 |
+
) == ujson.ujson_loads(
|
674 |
+
ujson.ujson_dumps(_TestObject("foo"), default_handler=my_obj_handler)
|
675 |
+
)
|
676 |
+
|
677 |
+
obj_list = [_TestObject("foo"), _TestObject("bar")]
|
678 |
+
assert json.loads(json.dumps(obj_list, default=str)) == ujson.ujson_loads(
|
679 |
+
ujson.ujson_dumps(obj_list, default_handler=str)
|
680 |
+
)
|
681 |
+
|
682 |
+
def test_encode_object(self):
|
683 |
+
class _TestObject:
|
684 |
+
def __init__(self, a, b, _c, d) -> None:
|
685 |
+
self.a = a
|
686 |
+
self.b = b
|
687 |
+
self._c = _c
|
688 |
+
self.d = d
|
689 |
+
|
690 |
+
def e(self):
|
691 |
+
return 5
|
692 |
+
|
693 |
+
# JSON keys should be all non-callable non-underscore attributes, see GH-42768
|
694 |
+
test_object = _TestObject(a=1, b=2, _c=3, d=4)
|
695 |
+
assert ujson.ujson_loads(ujson.ujson_dumps(test_object)) == {
|
696 |
+
"a": 1,
|
697 |
+
"b": 2,
|
698 |
+
"d": 4,
|
699 |
+
}
|
700 |
+
|
701 |
+
def test_ujson__name__(self):
|
702 |
+
# GH 52898
|
703 |
+
assert ujson.__name__ == "pandas._libs.json"
|
704 |
+
|
705 |
+
|
706 |
+
class TestNumpyJSONTests:
|
707 |
+
@pytest.mark.parametrize("bool_input", [True, False])
|
708 |
+
def test_bool(self, bool_input):
|
709 |
+
b = bool(bool_input)
|
710 |
+
assert ujson.ujson_loads(ujson.ujson_dumps(b)) == b
|
711 |
+
|
712 |
+
def test_bool_array(self):
|
713 |
+
bool_array = np.array(
|
714 |
+
[True, False, True, True, False, True, False, False], dtype=bool
|
715 |
+
)
|
716 |
+
output = np.array(ujson.ujson_loads(ujson.ujson_dumps(bool_array)), dtype=bool)
|
717 |
+
tm.assert_numpy_array_equal(bool_array, output)
|
718 |
+
|
719 |
+
def test_int(self, any_int_numpy_dtype):
|
720 |
+
klass = np.dtype(any_int_numpy_dtype).type
|
721 |
+
num = klass(1)
|
722 |
+
|
723 |
+
assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
|
724 |
+
|
725 |
+
def test_int_array(self, any_int_numpy_dtype):
|
726 |
+
arr = np.arange(100, dtype=int)
|
727 |
+
arr_input = arr.astype(any_int_numpy_dtype)
|
728 |
+
|
729 |
+
arr_output = np.array(
|
730 |
+
ujson.ujson_loads(ujson.ujson_dumps(arr_input)), dtype=any_int_numpy_dtype
|
731 |
+
)
|
732 |
+
tm.assert_numpy_array_equal(arr_input, arr_output)
|
733 |
+
|
734 |
+
def test_int_max(self, any_int_numpy_dtype):
|
735 |
+
if any_int_numpy_dtype in ("int64", "uint64") and not IS64:
|
736 |
+
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
|
737 |
+
|
738 |
+
klass = np.dtype(any_int_numpy_dtype).type
|
739 |
+
|
740 |
+
# uint64 max will always overflow,
|
741 |
+
# as it's encoded to signed.
|
742 |
+
if any_int_numpy_dtype == "uint64":
|
743 |
+
num = np.iinfo("int64").max
|
744 |
+
else:
|
745 |
+
num = np.iinfo(any_int_numpy_dtype).max
|
746 |
+
|
747 |
+
assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
|
748 |
+
|
749 |
+
def test_float(self, float_numpy_dtype):
|
750 |
+
klass = np.dtype(float_numpy_dtype).type
|
751 |
+
num = klass(256.2013)
|
752 |
+
|
753 |
+
assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
|
754 |
+
|
755 |
+
def test_float_array(self, float_numpy_dtype):
|
756 |
+
arr = np.arange(12.5, 185.72, 1.7322, dtype=float)
|
757 |
+
float_input = arr.astype(float_numpy_dtype)
|
758 |
+
|
759 |
+
float_output = np.array(
|
760 |
+
ujson.ujson_loads(ujson.ujson_dumps(float_input, double_precision=15)),
|
761 |
+
dtype=float_numpy_dtype,
|
762 |
+
)
|
763 |
+
tm.assert_almost_equal(float_input, float_output)
|
764 |
+
|
765 |
+
def test_float_max(self, float_numpy_dtype):
|
766 |
+
klass = np.dtype(float_numpy_dtype).type
|
767 |
+
num = klass(np.finfo(float_numpy_dtype).max / 10)
|
768 |
+
|
769 |
+
tm.assert_almost_equal(
|
770 |
+
klass(ujson.ujson_loads(ujson.ujson_dumps(num, double_precision=15))), num
|
771 |
+
)
|
772 |
+
|
773 |
+
def test_array_basic(self):
|
774 |
+
arr = np.arange(96)
|
775 |
+
arr = arr.reshape((2, 2, 2, 2, 3, 2))
|
776 |
+
|
777 |
+
tm.assert_numpy_array_equal(
|
778 |
+
np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr
|
779 |
+
)
|
780 |
+
|
781 |
+
@pytest.mark.parametrize("shape", [(10, 10), (5, 5, 4), (100, 1)])
|
782 |
+
def test_array_reshaped(self, shape):
|
783 |
+
arr = np.arange(100)
|
784 |
+
arr = arr.reshape(shape)
|
785 |
+
|
786 |
+
tm.assert_numpy_array_equal(
|
787 |
+
np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr
|
788 |
+
)
|
789 |
+
|
790 |
+
def test_array_list(self):
|
791 |
+
arr_list = [
|
792 |
+
"a",
|
793 |
+
[],
|
794 |
+
{},
|
795 |
+
{},
|
796 |
+
[],
|
797 |
+
42,
|
798 |
+
97.8,
|
799 |
+
["a", "b"],
|
800 |
+
{"key": "val"},
|
801 |
+
]
|
802 |
+
arr = np.array(arr_list, dtype=object)
|
803 |
+
result = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=object)
|
804 |
+
tm.assert_numpy_array_equal(result, arr)
|
805 |
+
|
806 |
+
def test_array_float(self):
|
807 |
+
dtype = np.float32
|
808 |
+
|
809 |
+
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
|
810 |
+
arr = arr.reshape((5, 5, 4))
|
811 |
+
|
812 |
+
arr_out = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=dtype)
|
813 |
+
tm.assert_almost_equal(arr, arr_out)
|
814 |
+
|
815 |
+
def test_0d_array(self):
|
816 |
+
# gh-18878
|
817 |
+
msg = re.escape(
|
818 |
+
"array(1) (numpy-scalar) is not JSON serializable at the moment"
|
819 |
+
)
|
820 |
+
with pytest.raises(TypeError, match=msg):
|
821 |
+
ujson.ujson_dumps(np.array(1))
|
822 |
+
|
823 |
+
def test_array_long_double(self):
|
824 |
+
msg = re.compile(
|
825 |
+
"1234.5.* \\(numpy-scalar\\) is not JSON serializable at the moment"
|
826 |
+
)
|
827 |
+
with pytest.raises(TypeError, match=msg):
|
828 |
+
ujson.ujson_dumps(np.longdouble(1234.5))
|
829 |
+
|
830 |
+
|
831 |
+
class TestPandasJSONTests:
|
832 |
+
def test_dataframe(self, orient):
|
833 |
+
dtype = np.int64
|
834 |
+
|
835 |
+
df = DataFrame(
|
836 |
+
[[1, 2, 3], [4, 5, 6]],
|
837 |
+
index=["a", "b"],
|
838 |
+
columns=["x", "y", "z"],
|
839 |
+
dtype=dtype,
|
840 |
+
)
|
841 |
+
encode_kwargs = {} if orient is None else {"orient": orient}
|
842 |
+
assert (df.dtypes == dtype).all()
|
843 |
+
|
844 |
+
output = ujson.ujson_loads(ujson.ujson_dumps(df, **encode_kwargs))
|
845 |
+
assert (df.dtypes == dtype).all()
|
846 |
+
|
847 |
+
# Ensure proper DataFrame initialization.
|
848 |
+
if orient == "split":
|
849 |
+
dec = _clean_dict(output)
|
850 |
+
output = DataFrame(**dec)
|
851 |
+
else:
|
852 |
+
output = DataFrame(output)
|
853 |
+
|
854 |
+
# Corrections to enable DataFrame comparison.
|
855 |
+
if orient == "values":
|
856 |
+
df.columns = [0, 1, 2]
|
857 |
+
df.index = [0, 1]
|
858 |
+
elif orient == "records":
|
859 |
+
df.index = [0, 1]
|
860 |
+
elif orient == "index":
|
861 |
+
df = df.transpose()
|
862 |
+
|
863 |
+
assert (df.dtypes == dtype).all()
|
864 |
+
tm.assert_frame_equal(output, df)
|
865 |
+
|
866 |
+
def test_dataframe_nested(self, orient):
|
867 |
+
df = DataFrame(
|
868 |
+
[[1, 2, 3], [4, 5, 6]], index=["a", "b"], columns=["x", "y", "z"]
|
869 |
+
)
|
870 |
+
|
871 |
+
nested = {"df1": df, "df2": df.copy()}
|
872 |
+
kwargs = {} if orient is None else {"orient": orient}
|
873 |
+
|
874 |
+
exp = {
|
875 |
+
"df1": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),
|
876 |
+
"df2": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),
|
877 |
+
}
|
878 |
+
assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp
|
879 |
+
|
880 |
+
def test_series(self, orient):
|
881 |
+
dtype = np.int64
|
882 |
+
s = Series(
|
883 |
+
[10, 20, 30, 40, 50, 60],
|
884 |
+
name="series",
|
885 |
+
index=[6, 7, 8, 9, 10, 15],
|
886 |
+
dtype=dtype,
|
887 |
+
).sort_values()
|
888 |
+
assert s.dtype == dtype
|
889 |
+
|
890 |
+
encode_kwargs = {} if orient is None else {"orient": orient}
|
891 |
+
|
892 |
+
output = ujson.ujson_loads(ujson.ujson_dumps(s, **encode_kwargs))
|
893 |
+
assert s.dtype == dtype
|
894 |
+
|
895 |
+
if orient == "split":
|
896 |
+
dec = _clean_dict(output)
|
897 |
+
output = Series(**dec)
|
898 |
+
else:
|
899 |
+
output = Series(output)
|
900 |
+
|
901 |
+
if orient in (None, "index"):
|
902 |
+
s.name = None
|
903 |
+
output = output.sort_values()
|
904 |
+
s.index = ["6", "7", "8", "9", "10", "15"]
|
905 |
+
elif orient in ("records", "values"):
|
906 |
+
s.name = None
|
907 |
+
s.index = [0, 1, 2, 3, 4, 5]
|
908 |
+
|
909 |
+
assert s.dtype == dtype
|
910 |
+
tm.assert_series_equal(output, s)
|
911 |
+
|
912 |
+
def test_series_nested(self, orient):
|
913 |
+
s = Series(
|
914 |
+
[10, 20, 30, 40, 50, 60], name="series", index=[6, 7, 8, 9, 10, 15]
|
915 |
+
).sort_values()
|
916 |
+
nested = {"s1": s, "s2": s.copy()}
|
917 |
+
kwargs = {} if orient is None else {"orient": orient}
|
918 |
+
|
919 |
+
exp = {
|
920 |
+
"s1": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),
|
921 |
+
"s2": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),
|
922 |
+
}
|
923 |
+
assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp
|
924 |
+
|
925 |
+
def test_index(self):
|
926 |
+
i = Index([23, 45, 18, 98, 43, 11], name="index")
|
927 |
+
|
928 |
+
# Column indexed.
|
929 |
+
output = Index(ujson.ujson_loads(ujson.ujson_dumps(i)), name="index")
|
930 |
+
tm.assert_index_equal(i, output)
|
931 |
+
|
932 |
+
dec = _clean_dict(ujson.ujson_loads(ujson.ujson_dumps(i, orient="split")))
|
933 |
+
output = Index(**dec)
|
934 |
+
|
935 |
+
tm.assert_index_equal(i, output)
|
936 |
+
assert i.name == output.name
|
937 |
+
|
938 |
+
tm.assert_index_equal(i, output)
|
939 |
+
assert i.name == output.name
|
940 |
+
|
941 |
+
output = Index(
|
942 |
+
ujson.ujson_loads(ujson.ujson_dumps(i, orient="values")), name="index"
|
943 |
+
)
|
944 |
+
tm.assert_index_equal(i, output)
|
945 |
+
|
946 |
+
output = Index(
|
947 |
+
ujson.ujson_loads(ujson.ujson_dumps(i, orient="records")), name="index"
|
948 |
+
)
|
949 |
+
tm.assert_index_equal(i, output)
|
950 |
+
|
951 |
+
output = Index(
|
952 |
+
ujson.ujson_loads(ujson.ujson_dumps(i, orient="index")), name="index"
|
953 |
+
)
|
954 |
+
tm.assert_index_equal(i, output)
|
955 |
+
|
956 |
+
def test_datetime_index(self):
|
957 |
+
date_unit = "ns"
|
958 |
+
|
959 |
+
# freq doesn't round-trip
|
960 |
+
rng = DatetimeIndex(list(date_range("1/1/2000", periods=20)), freq=None)
|
961 |
+
encoded = ujson.ujson_dumps(rng, date_unit=date_unit)
|
962 |
+
|
963 |
+
decoded = DatetimeIndex(np.array(ujson.ujson_loads(encoded)))
|
964 |
+
tm.assert_index_equal(rng, decoded)
|
965 |
+
|
966 |
+
ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
|
967 |
+
decoded = Series(ujson.ujson_loads(ujson.ujson_dumps(ts, date_unit=date_unit)))
|
968 |
+
|
969 |
+
idx_values = decoded.index.values.astype(np.int64)
|
970 |
+
decoded.index = DatetimeIndex(idx_values)
|
971 |
+
tm.assert_series_equal(ts, decoded)
|
972 |
+
|
973 |
+
@pytest.mark.parametrize(
|
974 |
+
"invalid_arr",
|
975 |
+
[
|
976 |
+
"[31337,]", # Trailing comma.
|
977 |
+
"[,31337]", # Leading comma.
|
978 |
+
"[]]", # Unmatched bracket.
|
979 |
+
"[,]", # Only comma.
|
980 |
+
],
|
981 |
+
)
|
982 |
+
def test_decode_invalid_array(self, invalid_arr):
|
983 |
+
msg = (
|
984 |
+
"Expected object or value|Trailing data|"
|
985 |
+
"Unexpected character found when decoding array value"
|
986 |
+
)
|
987 |
+
with pytest.raises(ValueError, match=msg):
|
988 |
+
ujson.ujson_loads(invalid_arr)
|
989 |
+
|
990 |
+
@pytest.mark.parametrize("arr", [[], [31337]])
|
991 |
+
def test_decode_array(self, arr):
|
992 |
+
assert arr == ujson.ujson_loads(str(arr))
|
993 |
+
|
994 |
+
@pytest.mark.parametrize("extreme_num", [9223372036854775807, -9223372036854775808])
|
995 |
+
def test_decode_extreme_numbers(self, extreme_num):
|
996 |
+
assert extreme_num == ujson.ujson_loads(str(extreme_num))
|
997 |
+
|
998 |
+
@pytest.mark.parametrize("too_extreme_num", [f"{2**64}", f"{-2**63-1}"])
|
999 |
+
def test_decode_too_extreme_numbers(self, too_extreme_num):
|
1000 |
+
with pytest.raises(
|
1001 |
+
ValueError,
|
1002 |
+
match="Value is too big|Value is too small",
|
1003 |
+
):
|
1004 |
+
ujson.ujson_loads(too_extreme_num)
|
1005 |
+
|
1006 |
+
def test_decode_with_trailing_whitespaces(self):
|
1007 |
+
assert {} == ujson.ujson_loads("{}\n\t ")
|
1008 |
+
|
1009 |
+
def test_decode_with_trailing_non_whitespaces(self):
|
1010 |
+
with pytest.raises(ValueError, match="Trailing data"):
|
1011 |
+
ujson.ujson_loads("{}\n\t a")
|
1012 |
+
|
1013 |
+
@pytest.mark.parametrize("value", [f"{2**64}", f"{-2**63-1}"])
|
1014 |
+
def test_decode_array_with_big_int(self, value):
|
1015 |
+
with pytest.raises(
|
1016 |
+
ValueError,
|
1017 |
+
match="Value is too big|Value is too small",
|
1018 |
+
):
|
1019 |
+
ujson.ujson_loads(value)
|
1020 |
+
|
1021 |
+
@pytest.mark.parametrize(
|
1022 |
+
"float_number",
|
1023 |
+
[
|
1024 |
+
1.1234567893,
|
1025 |
+
1.234567893,
|
1026 |
+
1.34567893,
|
1027 |
+
1.4567893,
|
1028 |
+
1.567893,
|
1029 |
+
1.67893,
|
1030 |
+
1.7893,
|
1031 |
+
1.893,
|
1032 |
+
1.3,
|
1033 |
+
],
|
1034 |
+
)
|
1035 |
+
@pytest.mark.parametrize("sign", [-1, 1])
|
1036 |
+
def test_decode_floating_point(self, sign, float_number):
|
1037 |
+
float_number *= sign
|
1038 |
+
tm.assert_almost_equal(
|
1039 |
+
float_number, ujson.ujson_loads(str(float_number)), rtol=1e-15
|
1040 |
+
)
|
1041 |
+
|
1042 |
+
def test_encode_big_set(self):
|
1043 |
+
s = set()
|
1044 |
+
|
1045 |
+
for x in range(100000):
|
1046 |
+
s.add(x)
|
1047 |
+
|
1048 |
+
# Make sure no Exception is raised.
|
1049 |
+
ujson.ujson_dumps(s)
|
1050 |
+
|
1051 |
+
def test_encode_empty_set(self):
|
1052 |
+
assert "[]" == ujson.ujson_dumps(set())
|
1053 |
+
|
1054 |
+
def test_encode_set(self):
|
1055 |
+
s = {1, 2, 3, 4, 5, 6, 7, 8, 9}
|
1056 |
+
enc = ujson.ujson_dumps(s)
|
1057 |
+
dec = ujson.ujson_loads(enc)
|
1058 |
+
|
1059 |
+
for v in dec:
|
1060 |
+
assert v in s
|
1061 |
+
|
1062 |
+
@pytest.mark.parametrize(
|
1063 |
+
"td",
|
1064 |
+
[
|
1065 |
+
Timedelta(days=366),
|
1066 |
+
Timedelta(days=-1),
|
1067 |
+
Timedelta(hours=13, minutes=5, seconds=5),
|
1068 |
+
Timedelta(hours=13, minutes=20, seconds=30),
|
1069 |
+
Timedelta(days=-1, nanoseconds=5),
|
1070 |
+
Timedelta(nanoseconds=1),
|
1071 |
+
Timedelta(microseconds=1, nanoseconds=1),
|
1072 |
+
Timedelta(milliseconds=1, microseconds=1, nanoseconds=1),
|
1073 |
+
Timedelta(milliseconds=999, microseconds=999, nanoseconds=999),
|
1074 |
+
],
|
1075 |
+
)
|
1076 |
+
def test_encode_timedelta_iso(self, td):
|
1077 |
+
# GH 28256
|
1078 |
+
result = ujson.ujson_dumps(td, iso_dates=True)
|
1079 |
+
expected = f'"{td.isoformat()}"'
|
1080 |
+
|
1081 |
+
assert result == expected
|
1082 |
+
|
1083 |
+
def test_encode_periodindex(self):
|
1084 |
+
# GH 46683
|
1085 |
+
p = PeriodIndex(["2022-04-06", "2022-04-07"], freq="D")
|
1086 |
+
df = DataFrame(index=p)
|
1087 |
+
assert df.to_json() == "{}"
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (192 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc
ADDED
Binary file (1.5 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc
ADDED
Binary file (407 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc
ADDED
Binary file (24.2 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc
ADDED
Binary file (4.46 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc
ADDED
Binary file (2.59 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc
ADDED
Binary file (4.91 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc
ADDED
Binary file (8.54 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc
ADDED
Binary file (11.7 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc
ADDED
Binary file (3.38 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc
ADDED
Binary file (10.4 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc
ADDED
Binary file (794 Bytes). View file
|
|