applied-ai-018 commited on
Commit
61531fe
·
verified ·
1 Parent(s): 8972ff3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 +3 -0
  3. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py +0 -0
  7. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py +9 -0
  18. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py +130 -0
  19. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py +21 -0
  20. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py +873 -0
  21. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.py +317 -0
  22. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_normalize.py +907 -0
  23. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_pandas.py +2202 -0
  24. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py +543 -0
  25. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py +1087 -0
  26. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__init__.py +0 -0
  29. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py +50 -0
  50. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py +9 -0
.gitattributes CHANGED
@@ -197,3 +197,4 @@ llmeval-env/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-l
197
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
198
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
199
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
197
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
198
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
199
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
200
+ llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f97bfc2cf75dc40da650eb97aff63d3e195f500cc623c74f3fe33ce2ce2b71f4
3
+ size 515090264
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc ADDED
Binary file (9.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (461 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc ADDED
Binary file (4.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc ADDED
Binary file (949 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc ADDED
Binary file (23.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc ADDED
Binary file (8.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc ADDED
Binary file (65.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc ADDED
Binary file (15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc ADDED
Binary file (35.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+
4
+ @pytest.fixture(params=["split", "records", "index", "columns", "values"])
5
+ def orient(request):
6
+ """
7
+ Fixture for orients excluding the table format.
8
+ """
9
+ return request.param
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import (
2
+ BytesIO,
3
+ StringIO,
4
+ )
5
+
6
+ import pytest
7
+
8
+ import pandas.util._test_decorators as td
9
+
10
+ import pandas as pd
11
+ import pandas._testing as tm
12
+
13
+
14
+ def test_compression_roundtrip(compression):
15
+ df = pd.DataFrame(
16
+ [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
17
+ index=["A", "B"],
18
+ columns=["X", "Y", "Z"],
19
+ )
20
+
21
+ with tm.ensure_clean() as path:
22
+ df.to_json(path, compression=compression)
23
+ tm.assert_frame_equal(df, pd.read_json(path, compression=compression))
24
+
25
+ # explicitly ensure file was compressed.
26
+ with tm.decompress_file(path, compression) as fh:
27
+ result = fh.read().decode("utf8")
28
+ data = StringIO(result)
29
+ tm.assert_frame_equal(df, pd.read_json(data))
30
+
31
+
32
+ def test_read_zipped_json(datapath):
33
+ uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json")
34
+ uncompressed_df = pd.read_json(uncompressed_path)
35
+
36
+ compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip")
37
+ compressed_df = pd.read_json(compressed_path, compression="zip")
38
+
39
+ tm.assert_frame_equal(uncompressed_df, compressed_df)
40
+
41
+
42
+ @td.skip_if_not_us_locale
43
+ @pytest.mark.single_cpu
44
+ def test_with_s3_url(compression, s3_public_bucket, s3so):
45
+ # Bucket created in tests/io/conftest.py
46
+ df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
47
+
48
+ with tm.ensure_clean() as path:
49
+ df.to_json(path, compression=compression)
50
+ with open(path, "rb") as f:
51
+ s3_public_bucket.put_object(Key="test-1", Body=f)
52
+
53
+ roundtripped_df = pd.read_json(
54
+ f"s3://{s3_public_bucket.name}/test-1",
55
+ compression=compression,
56
+ storage_options=s3so,
57
+ )
58
+ tm.assert_frame_equal(df, roundtripped_df)
59
+
60
+
61
+ def test_lines_with_compression(compression):
62
+ with tm.ensure_clean() as path:
63
+ df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
64
+ df.to_json(path, orient="records", lines=True, compression=compression)
65
+ roundtripped_df = pd.read_json(path, lines=True, compression=compression)
66
+ tm.assert_frame_equal(df, roundtripped_df)
67
+
68
+
69
+ def test_chunksize_with_compression(compression):
70
+ with tm.ensure_clean() as path:
71
+ df = pd.read_json(StringIO('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}'))
72
+ df.to_json(path, orient="records", lines=True, compression=compression)
73
+
74
+ with pd.read_json(
75
+ path, lines=True, chunksize=1, compression=compression
76
+ ) as res:
77
+ roundtripped_df = pd.concat(res)
78
+ tm.assert_frame_equal(df, roundtripped_df)
79
+
80
+
81
+ def test_write_unsupported_compression_type():
82
+ df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
83
+ with tm.ensure_clean() as path:
84
+ msg = "Unrecognized compression type: unsupported"
85
+ with pytest.raises(ValueError, match=msg):
86
+ df.to_json(path, compression="unsupported")
87
+
88
+
89
+ def test_read_unsupported_compression_type():
90
+ with tm.ensure_clean() as path:
91
+ msg = "Unrecognized compression type: unsupported"
92
+ with pytest.raises(ValueError, match=msg):
93
+ pd.read_json(path, compression="unsupported")
94
+
95
+
96
+ @pytest.mark.parametrize(
97
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
98
+ )
99
+ @pytest.mark.parametrize("to_infer", [True, False])
100
+ @pytest.mark.parametrize("read_infer", [True, False])
101
+ def test_to_json_compression(
102
+ compression_only, read_infer, to_infer, compression_to_extension, infer_string
103
+ ):
104
+ with pd.option_context("future.infer_string", infer_string):
105
+ # see gh-15008
106
+ compression = compression_only
107
+
108
+ # We'll complete file extension subsequently.
109
+ filename = "test."
110
+ filename += compression_to_extension[compression]
111
+
112
+ df = pd.DataFrame({"A": [1]})
113
+
114
+ to_compression = "infer" if to_infer else compression
115
+ read_compression = "infer" if read_infer else compression
116
+
117
+ with tm.ensure_clean(filename) as path:
118
+ df.to_json(path, compression=to_compression)
119
+ result = pd.read_json(path, compression=read_compression)
120
+ tm.assert_frame_equal(result, df)
121
+
122
+
123
+ def test_to_json_compression_mode(compression):
124
+ # GH 39985 (read_json does not support user-provided binary files)
125
+ expected = pd.DataFrame({"A": [1]})
126
+
127
+ with BytesIO() as buffer:
128
+ expected.to_json(buffer, compression=compression)
129
+ # df = pd.read_json(buffer, compression=compression)
130
+ # tm.assert_frame_equal(expected, df)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for the deprecated keyword arguments for `read_json`.
3
+ """
4
+ from io import StringIO
5
+
6
+ import pandas as pd
7
+ import pandas._testing as tm
8
+
9
+ from pandas.io.json import read_json
10
+
11
+
12
+ def test_good_kwargs():
13
+ df = pd.DataFrame({"A": [2, 4, 6], "B": [3, 6, 9]}, index=[0, 1, 2])
14
+
15
+ with tm.assert_produces_warning(None):
16
+ data1 = StringIO(df.to_json(orient="split"))
17
+ tm.assert_frame_equal(df, read_json(data1, orient="split"))
18
+ data2 = StringIO(df.to_json(orient="columns"))
19
+ tm.assert_frame_equal(df, read_json(data2, orient="columns"))
20
+ data3 = StringIO(df.to_json(orient="index"))
21
+ tm.assert_frame_equal(df, read_json(data3, orient="index"))
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py ADDED
@@ -0,0 +1,873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for Table Schema integration."""
2
+ from collections import OrderedDict
3
+ from io import StringIO
4
+ import json
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ from pandas.core.dtypes.dtypes import (
10
+ CategoricalDtype,
11
+ DatetimeTZDtype,
12
+ PeriodDtype,
13
+ )
14
+
15
+ import pandas as pd
16
+ from pandas import DataFrame
17
+ import pandas._testing as tm
18
+
19
+ from pandas.io.json._table_schema import (
20
+ as_json_table_type,
21
+ build_table_schema,
22
+ convert_json_field_to_pandas_type,
23
+ convert_pandas_type_to_json_field,
24
+ set_default_names,
25
+ )
26
+
27
+
28
+ @pytest.fixture
29
+ def df_schema():
30
+ return DataFrame(
31
+ {
32
+ "A": [1, 2, 3, 4],
33
+ "B": ["a", "b", "c", "c"],
34
+ "C": pd.date_range("2016-01-01", freq="d", periods=4),
35
+ "D": pd.timedelta_range("1h", periods=4, freq="min"),
36
+ },
37
+ index=pd.Index(range(4), name="idx"),
38
+ )
39
+
40
+
41
+ @pytest.fixture
42
+ def df_table():
43
+ return DataFrame(
44
+ {
45
+ "A": [1, 2, 3, 4],
46
+ "B": ["a", "b", "c", "c"],
47
+ "C": pd.date_range("2016-01-01", freq="d", periods=4),
48
+ "D": pd.timedelta_range("1h", periods=4, freq="min"),
49
+ "E": pd.Series(pd.Categorical(["a", "b", "c", "c"])),
50
+ "F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)),
51
+ "G": [1.0, 2.0, 3, 4.0],
52
+ "H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"),
53
+ },
54
+ index=pd.Index(range(4), name="idx"),
55
+ )
56
+
57
+
58
+ class TestBuildSchema:
59
+ def test_build_table_schema(self, df_schema, using_infer_string):
60
+ result = build_table_schema(df_schema, version=False)
61
+ expected = {
62
+ "fields": [
63
+ {"name": "idx", "type": "integer"},
64
+ {"name": "A", "type": "integer"},
65
+ {"name": "B", "type": "string"},
66
+ {"name": "C", "type": "datetime"},
67
+ {"name": "D", "type": "duration"},
68
+ ],
69
+ "primaryKey": ["idx"],
70
+ }
71
+ if using_infer_string:
72
+ expected["fields"][2] = {"name": "B", "type": "any", "extDtype": "string"}
73
+ assert result == expected
74
+ result = build_table_schema(df_schema)
75
+ assert "pandas_version" in result
76
+
77
+ def test_series(self):
78
+ s = pd.Series([1, 2, 3], name="foo")
79
+ result = build_table_schema(s, version=False)
80
+ expected = {
81
+ "fields": [
82
+ {"name": "index", "type": "integer"},
83
+ {"name": "foo", "type": "integer"},
84
+ ],
85
+ "primaryKey": ["index"],
86
+ }
87
+ assert result == expected
88
+ result = build_table_schema(s)
89
+ assert "pandas_version" in result
90
+
91
+ def test_series_unnamed(self):
92
+ result = build_table_schema(pd.Series([1, 2, 3]), version=False)
93
+ expected = {
94
+ "fields": [
95
+ {"name": "index", "type": "integer"},
96
+ {"name": "values", "type": "integer"},
97
+ ],
98
+ "primaryKey": ["index"],
99
+ }
100
+ assert result == expected
101
+
102
+ def test_multiindex(self, df_schema, using_infer_string):
103
+ df = df_schema
104
+ idx = pd.MultiIndex.from_product([("a", "b"), (1, 2)])
105
+ df.index = idx
106
+
107
+ result = build_table_schema(df, version=False)
108
+ expected = {
109
+ "fields": [
110
+ {"name": "level_0", "type": "string"},
111
+ {"name": "level_1", "type": "integer"},
112
+ {"name": "A", "type": "integer"},
113
+ {"name": "B", "type": "string"},
114
+ {"name": "C", "type": "datetime"},
115
+ {"name": "D", "type": "duration"},
116
+ ],
117
+ "primaryKey": ["level_0", "level_1"],
118
+ }
119
+ if using_infer_string:
120
+ expected["fields"][0] = {
121
+ "name": "level_0",
122
+ "type": "any",
123
+ "extDtype": "string",
124
+ }
125
+ expected["fields"][3] = {"name": "B", "type": "any", "extDtype": "string"}
126
+ assert result == expected
127
+
128
+ df.index.names = ["idx0", None]
129
+ expected["fields"][0]["name"] = "idx0"
130
+ expected["primaryKey"] = ["idx0", "level_1"]
131
+ result = build_table_schema(df, version=False)
132
+ assert result == expected
133
+
134
+
135
+ class TestTableSchemaType:
136
+ @pytest.mark.parametrize("int_type", [int, np.int16, np.int32, np.int64])
137
+ def test_as_json_table_type_int_data(self, int_type):
138
+ int_data = [1, 2, 3]
139
+ assert as_json_table_type(np.array(int_data, dtype=int_type).dtype) == "integer"
140
+
141
+ @pytest.mark.parametrize("float_type", [float, np.float16, np.float32, np.float64])
142
+ def test_as_json_table_type_float_data(self, float_type):
143
+ float_data = [1.0, 2.0, 3.0]
144
+ assert (
145
+ as_json_table_type(np.array(float_data, dtype=float_type).dtype) == "number"
146
+ )
147
+
148
+ @pytest.mark.parametrize("bool_type", [bool, np.bool_])
149
+ def test_as_json_table_type_bool_data(self, bool_type):
150
+ bool_data = [True, False]
151
+ assert (
152
+ as_json_table_type(np.array(bool_data, dtype=bool_type).dtype) == "boolean"
153
+ )
154
+
155
+ @pytest.mark.parametrize(
156
+ "date_data",
157
+ [
158
+ pd.to_datetime(["2016"]),
159
+ pd.to_datetime(["2016"], utc=True),
160
+ pd.Series(pd.to_datetime(["2016"])),
161
+ pd.Series(pd.to_datetime(["2016"], utc=True)),
162
+ pd.period_range("2016", freq="Y", periods=3),
163
+ ],
164
+ )
165
+ def test_as_json_table_type_date_data(self, date_data):
166
+ assert as_json_table_type(date_data.dtype) == "datetime"
167
+
168
+ @pytest.mark.parametrize(
169
+ "str_data",
170
+ [pd.Series(["a", "b"], dtype=object), pd.Index(["a", "b"], dtype=object)],
171
+ )
172
+ def test_as_json_table_type_string_data(self, str_data):
173
+ assert as_json_table_type(str_data.dtype) == "string"
174
+
175
+ @pytest.mark.parametrize(
176
+ "cat_data",
177
+ [
178
+ pd.Categorical(["a"]),
179
+ pd.Categorical([1]),
180
+ pd.Series(pd.Categorical([1])),
181
+ pd.CategoricalIndex([1]),
182
+ pd.Categorical([1]),
183
+ ],
184
+ )
185
+ def test_as_json_table_type_categorical_data(self, cat_data):
186
+ assert as_json_table_type(cat_data.dtype) == "any"
187
+
188
+ # ------
189
+ # dtypes
190
+ # ------
191
+ @pytest.mark.parametrize("int_dtype", [int, np.int16, np.int32, np.int64])
192
+ def test_as_json_table_type_int_dtypes(self, int_dtype):
193
+ assert as_json_table_type(int_dtype) == "integer"
194
+
195
+ @pytest.mark.parametrize("float_dtype", [float, np.float16, np.float32, np.float64])
196
+ def test_as_json_table_type_float_dtypes(self, float_dtype):
197
+ assert as_json_table_type(float_dtype) == "number"
198
+
199
+ @pytest.mark.parametrize("bool_dtype", [bool, np.bool_])
200
+ def test_as_json_table_type_bool_dtypes(self, bool_dtype):
201
+ assert as_json_table_type(bool_dtype) == "boolean"
202
+
203
+ @pytest.mark.parametrize(
204
+ "date_dtype",
205
+ [
206
+ np.dtype("<M8[ns]"),
207
+ PeriodDtype("D"),
208
+ DatetimeTZDtype("ns", "US/Central"),
209
+ ],
210
+ )
211
+ def test_as_json_table_type_date_dtypes(self, date_dtype):
212
+ # TODO: datedate.date? datetime.time?
213
+ assert as_json_table_type(date_dtype) == "datetime"
214
+
215
+ @pytest.mark.parametrize("td_dtype", [np.dtype("<m8[ns]")])
216
+ def test_as_json_table_type_timedelta_dtypes(self, td_dtype):
217
+ assert as_json_table_type(td_dtype) == "duration"
218
+
219
+ @pytest.mark.parametrize("str_dtype", [object]) # TODO(GH#14904) flesh out dtypes?
220
+ def test_as_json_table_type_string_dtypes(self, str_dtype):
221
+ assert as_json_table_type(str_dtype) == "string"
222
+
223
+ def test_as_json_table_type_categorical_dtypes(self):
224
+ assert as_json_table_type(pd.Categorical(["a"]).dtype) == "any"
225
+ assert as_json_table_type(CategoricalDtype()) == "any"
226
+
227
+
228
+ class TestTableOrient:
229
+ def test_build_series(self):
230
+ s = pd.Series([1, 2], name="a")
231
+ s.index.name = "id"
232
+ result = s.to_json(orient="table", date_format="iso")
233
+ result = json.loads(result, object_pairs_hook=OrderedDict)
234
+
235
+ assert "pandas_version" in result["schema"]
236
+ result["schema"].pop("pandas_version")
237
+
238
+ fields = [{"name": "id", "type": "integer"}, {"name": "a", "type": "integer"}]
239
+
240
+ schema = {"fields": fields, "primaryKey": ["id"]}
241
+
242
+ expected = OrderedDict(
243
+ [
244
+ ("schema", schema),
245
+ (
246
+ "data",
247
+ [
248
+ OrderedDict([("id", 0), ("a", 1)]),
249
+ OrderedDict([("id", 1), ("a", 2)]),
250
+ ],
251
+ ),
252
+ ]
253
+ )
254
+
255
+ assert result == expected
256
+
257
+ def test_read_json_from_to_json_results(self):
258
+ # GH32383
259
+ df = DataFrame(
260
+ {
261
+ "_id": {"row_0": 0},
262
+ "category": {"row_0": "Goods"},
263
+ "recommender_id": {"row_0": 3},
264
+ "recommender_name_jp": {"row_0": "浦田"},
265
+ "recommender_name_en": {"row_0": "Urata"},
266
+ "name_jp": {"row_0": "博多人形(松尾吉将まつお よしまさ)"},
267
+ "name_en": {"row_0": "Hakata Dolls Matsuo"},
268
+ }
269
+ )
270
+
271
+ result1 = pd.read_json(StringIO(df.to_json()))
272
+ result2 = DataFrame.from_dict(json.loads(df.to_json()))
273
+ tm.assert_frame_equal(result1, df)
274
+ tm.assert_frame_equal(result2, df)
275
+
276
+ def test_to_json(self, df_table, using_infer_string):
277
+ df = df_table
278
+ df.index.name = "idx"
279
+ result = df.to_json(orient="table", date_format="iso")
280
+ result = json.loads(result, object_pairs_hook=OrderedDict)
281
+
282
+ assert "pandas_version" in result["schema"]
283
+ result["schema"].pop("pandas_version")
284
+
285
+ fields = [
286
+ {"name": "idx", "type": "integer"},
287
+ {"name": "A", "type": "integer"},
288
+ {"name": "B", "type": "string"},
289
+ {"name": "C", "type": "datetime"},
290
+ {"name": "D", "type": "duration"},
291
+ {
292
+ "constraints": {"enum": ["a", "b", "c"]},
293
+ "name": "E",
294
+ "ordered": False,
295
+ "type": "any",
296
+ },
297
+ {
298
+ "constraints": {"enum": ["a", "b", "c"]},
299
+ "name": "F",
300
+ "ordered": True,
301
+ "type": "any",
302
+ },
303
+ {"name": "G", "type": "number"},
304
+ {"name": "H", "type": "datetime", "tz": "US/Central"},
305
+ ]
306
+
307
+ if using_infer_string:
308
+ fields[2] = {"name": "B", "type": "any", "extDtype": "string"}
309
+
310
+ schema = {"fields": fields, "primaryKey": ["idx"]}
311
+ data = [
312
+ OrderedDict(
313
+ [
314
+ ("idx", 0),
315
+ ("A", 1),
316
+ ("B", "a"),
317
+ ("C", "2016-01-01T00:00:00.000"),
318
+ ("D", "P0DT1H0M0S"),
319
+ ("E", "a"),
320
+ ("F", "a"),
321
+ ("G", 1.0),
322
+ ("H", "2016-01-01T06:00:00.000Z"),
323
+ ]
324
+ ),
325
+ OrderedDict(
326
+ [
327
+ ("idx", 1),
328
+ ("A", 2),
329
+ ("B", "b"),
330
+ ("C", "2016-01-02T00:00:00.000"),
331
+ ("D", "P0DT1H1M0S"),
332
+ ("E", "b"),
333
+ ("F", "b"),
334
+ ("G", 2.0),
335
+ ("H", "2016-01-02T06:00:00.000Z"),
336
+ ]
337
+ ),
338
+ OrderedDict(
339
+ [
340
+ ("idx", 2),
341
+ ("A", 3),
342
+ ("B", "c"),
343
+ ("C", "2016-01-03T00:00:00.000"),
344
+ ("D", "P0DT1H2M0S"),
345
+ ("E", "c"),
346
+ ("F", "c"),
347
+ ("G", 3.0),
348
+ ("H", "2016-01-03T06:00:00.000Z"),
349
+ ]
350
+ ),
351
+ OrderedDict(
352
+ [
353
+ ("idx", 3),
354
+ ("A", 4),
355
+ ("B", "c"),
356
+ ("C", "2016-01-04T00:00:00.000"),
357
+ ("D", "P0DT1H3M0S"),
358
+ ("E", "c"),
359
+ ("F", "c"),
360
+ ("G", 4.0),
361
+ ("H", "2016-01-04T06:00:00.000Z"),
362
+ ]
363
+ ),
364
+ ]
365
+ expected = OrderedDict([("schema", schema), ("data", data)])
366
+
367
+ assert result == expected
368
+
369
+ def test_to_json_float_index(self):
370
+ data = pd.Series(1, index=[1.0, 2.0])
371
+ result = data.to_json(orient="table", date_format="iso")
372
+ result = json.loads(result, object_pairs_hook=OrderedDict)
373
+ result["schema"].pop("pandas_version")
374
+
375
+ expected = OrderedDict(
376
+ [
377
+ (
378
+ "schema",
379
+ {
380
+ "fields": [
381
+ {"name": "index", "type": "number"},
382
+ {"name": "values", "type": "integer"},
383
+ ],
384
+ "primaryKey": ["index"],
385
+ },
386
+ ),
387
+ (
388
+ "data",
389
+ [
390
+ OrderedDict([("index", 1.0), ("values", 1)]),
391
+ OrderedDict([("index", 2.0), ("values", 1)]),
392
+ ],
393
+ ),
394
+ ]
395
+ )
396
+
397
+ assert result == expected
398
+
399
+ def test_to_json_period_index(self):
400
+ idx = pd.period_range("2016", freq="Q-JAN", periods=2)
401
+ data = pd.Series(1, idx)
402
+ result = data.to_json(orient="table", date_format="iso")
403
+ result = json.loads(result, object_pairs_hook=OrderedDict)
404
+ result["schema"].pop("pandas_version")
405
+
406
+ fields = [
407
+ {"freq": "QE-JAN", "name": "index", "type": "datetime"},
408
+ {"name": "values", "type": "integer"},
409
+ ]
410
+
411
+ schema = {"fields": fields, "primaryKey": ["index"]}
412
+ data = [
413
+ OrderedDict([("index", "2015-11-01T00:00:00.000"), ("values", 1)]),
414
+ OrderedDict([("index", "2016-02-01T00:00:00.000"), ("values", 1)]),
415
+ ]
416
+ expected = OrderedDict([("schema", schema), ("data", data)])
417
+
418
+ assert result == expected
419
+
420
+ def test_to_json_categorical_index(self):
421
+ data = pd.Series(1, pd.CategoricalIndex(["a", "b"]))
422
+ result = data.to_json(orient="table", date_format="iso")
423
+ result = json.loads(result, object_pairs_hook=OrderedDict)
424
+ result["schema"].pop("pandas_version")
425
+
426
+ expected = OrderedDict(
427
+ [
428
+ (
429
+ "schema",
430
+ {
431
+ "fields": [
432
+ {
433
+ "name": "index",
434
+ "type": "any",
435
+ "constraints": {"enum": ["a", "b"]},
436
+ "ordered": False,
437
+ },
438
+ {"name": "values", "type": "integer"},
439
+ ],
440
+ "primaryKey": ["index"],
441
+ },
442
+ ),
443
+ (
444
+ "data",
445
+ [
446
+ OrderedDict([("index", "a"), ("values", 1)]),
447
+ OrderedDict([("index", "b"), ("values", 1)]),
448
+ ],
449
+ ),
450
+ ]
451
+ )
452
+
453
+ assert result == expected
454
+
455
+ def test_date_format_raises(self, df_table):
456
+ msg = (
457
+ "Trying to write with `orient='table'` and `date_format='epoch'`. Table "
458
+ "Schema requires dates to be formatted with `date_format='iso'`"
459
+ )
460
+ with pytest.raises(ValueError, match=msg):
461
+ df_table.to_json(orient="table", date_format="epoch")
462
+
463
+ # others work
464
+ df_table.to_json(orient="table", date_format="iso")
465
+ df_table.to_json(orient="table")
466
+
467
+ def test_convert_pandas_type_to_json_field_int(self, index_or_series):
468
+ kind = index_or_series
469
+ data = [1, 2, 3]
470
+ result = convert_pandas_type_to_json_field(kind(data, name="name"))
471
+ expected = {"name": "name", "type": "integer"}
472
+ assert result == expected
473
+
474
+ def test_convert_pandas_type_to_json_field_float(self, index_or_series):
475
+ kind = index_or_series
476
+ data = [1.0, 2.0, 3.0]
477
+ result = convert_pandas_type_to_json_field(kind(data, name="name"))
478
+ expected = {"name": "name", "type": "number"}
479
+ assert result == expected
480
+
481
+ @pytest.mark.parametrize(
482
+ "dt_args,extra_exp", [({}, {}), ({"utc": True}, {"tz": "UTC"})]
483
+ )
484
+ @pytest.mark.parametrize("wrapper", [None, pd.Series])
485
+ def test_convert_pandas_type_to_json_field_datetime(
486
+ self, dt_args, extra_exp, wrapper
487
+ ):
488
+ data = [1.0, 2.0, 3.0]
489
+ data = pd.to_datetime(data, **dt_args)
490
+ if wrapper is pd.Series:
491
+ data = pd.Series(data, name="values")
492
+ result = convert_pandas_type_to_json_field(data)
493
+ expected = {"name": "values", "type": "datetime"}
494
+ expected.update(extra_exp)
495
+ assert result == expected
496
+
497
+ def test_convert_pandas_type_to_json_period_range(self):
498
+ arr = pd.period_range("2016", freq="Y-DEC", periods=4)
499
+ result = convert_pandas_type_to_json_field(arr)
500
+ expected = {"name": "values", "type": "datetime", "freq": "YE-DEC"}
501
+ assert result == expected
502
+
503
+ @pytest.mark.parametrize("kind", [pd.Categorical, pd.CategoricalIndex])
504
+ @pytest.mark.parametrize("ordered", [True, False])
505
+ def test_convert_pandas_type_to_json_field_categorical(self, kind, ordered):
506
+ data = ["a", "b", "c"]
507
+ if kind is pd.Categorical:
508
+ arr = pd.Series(kind(data, ordered=ordered), name="cats")
509
+ elif kind is pd.CategoricalIndex:
510
+ arr = kind(data, ordered=ordered, name="cats")
511
+
512
+ result = convert_pandas_type_to_json_field(arr)
513
+ expected = {
514
+ "name": "cats",
515
+ "type": "any",
516
+ "constraints": {"enum": data},
517
+ "ordered": ordered,
518
+ }
519
+ assert result == expected
520
+
521
+ @pytest.mark.parametrize(
522
+ "inp,exp",
523
+ [
524
+ ({"type": "integer"}, "int64"),
525
+ ({"type": "number"}, "float64"),
526
+ ({"type": "boolean"}, "bool"),
527
+ ({"type": "duration"}, "timedelta64"),
528
+ ({"type": "datetime"}, "datetime64[ns]"),
529
+ ({"type": "datetime", "tz": "US/Hawaii"}, "datetime64[ns, US/Hawaii]"),
530
+ ({"type": "any"}, "object"),
531
+ (
532
+ {
533
+ "type": "any",
534
+ "constraints": {"enum": ["a", "b", "c"]},
535
+ "ordered": False,
536
+ },
537
+ CategoricalDtype(categories=["a", "b", "c"], ordered=False),
538
+ ),
539
+ (
540
+ {
541
+ "type": "any",
542
+ "constraints": {"enum": ["a", "b", "c"]},
543
+ "ordered": True,
544
+ },
545
+ CategoricalDtype(categories=["a", "b", "c"], ordered=True),
546
+ ),
547
+ ({"type": "string"}, "object"),
548
+ ],
549
+ )
550
+ def test_convert_json_field_to_pandas_type(self, inp, exp):
551
+ field = {"name": "foo"}
552
+ field.update(inp)
553
+ assert convert_json_field_to_pandas_type(field) == exp
554
+
555
+ @pytest.mark.parametrize("inp", ["geopoint", "geojson", "fake_type"])
556
+ def test_convert_json_field_to_pandas_type_raises(self, inp):
557
+ field = {"type": inp}
558
+ with pytest.raises(
559
+ ValueError, match=f"Unsupported or invalid field type: {inp}"
560
+ ):
561
+ convert_json_field_to_pandas_type(field)
562
+
563
+ def test_categorical(self):
564
+ s = pd.Series(pd.Categorical(["a", "b", "a"]))
565
+ s.index.name = "idx"
566
+ result = s.to_json(orient="table", date_format="iso")
567
+ result = json.loads(result, object_pairs_hook=OrderedDict)
568
+ result["schema"].pop("pandas_version")
569
+
570
+ fields = [
571
+ {"name": "idx", "type": "integer"},
572
+ {
573
+ "constraints": {"enum": ["a", "b"]},
574
+ "name": "values",
575
+ "ordered": False,
576
+ "type": "any",
577
+ },
578
+ ]
579
+
580
+ expected = OrderedDict(
581
+ [
582
+ ("schema", {"fields": fields, "primaryKey": ["idx"]}),
583
+ (
584
+ "data",
585
+ [
586
+ OrderedDict([("idx", 0), ("values", "a")]),
587
+ OrderedDict([("idx", 1), ("values", "b")]),
588
+ OrderedDict([("idx", 2), ("values", "a")]),
589
+ ],
590
+ ),
591
+ ]
592
+ )
593
+
594
+ assert result == expected
595
+
596
+ @pytest.mark.parametrize(
597
+ "idx,nm,prop",
598
+ [
599
+ (pd.Index([1]), "index", "name"),
600
+ (pd.Index([1], name="myname"), "myname", "name"),
601
+ (
602
+ pd.MultiIndex.from_product([("a", "b"), ("c", "d")]),
603
+ ["level_0", "level_1"],
604
+ "names",
605
+ ),
606
+ (
607
+ pd.MultiIndex.from_product(
608
+ [("a", "b"), ("c", "d")], names=["n1", "n2"]
609
+ ),
610
+ ["n1", "n2"],
611
+ "names",
612
+ ),
613
+ (
614
+ pd.MultiIndex.from_product(
615
+ [("a", "b"), ("c", "d")], names=["n1", None]
616
+ ),
617
+ ["n1", "level_1"],
618
+ "names",
619
+ ),
620
+ ],
621
+ )
622
+ def test_set_names_unset(self, idx, nm, prop):
623
+ data = pd.Series(1, idx)
624
+ result = set_default_names(data)
625
+ assert getattr(result.index, prop) == nm
626
+
627
+ @pytest.mark.parametrize(
628
+ "idx",
629
+ [
630
+ pd.Index([], name="index"),
631
+ pd.MultiIndex.from_arrays([["foo"], ["bar"]], names=("level_0", "level_1")),
632
+ pd.MultiIndex.from_arrays([["foo"], ["bar"]], names=("foo", "level_1")),
633
+ ],
634
+ )
635
+ def test_warns_non_roundtrippable_names(self, idx):
636
+ # GH 19130
637
+ df = DataFrame(index=idx)
638
+ df.index.name = "index"
639
+ with tm.assert_produces_warning():
640
+ set_default_names(df)
641
+
642
+ def test_timestamp_in_columns(self):
643
+ df = DataFrame(
644
+ [[1, 2]], columns=[pd.Timestamp("2016"), pd.Timedelta(10, unit="s")]
645
+ )
646
+ result = df.to_json(orient="table")
647
+ js = json.loads(result)
648
+ assert js["schema"]["fields"][1]["name"] == "2016-01-01T00:00:00.000"
649
+ assert js["schema"]["fields"][2]["name"] == "P0DT0H0M10S"
650
+
651
+ @pytest.mark.parametrize(
652
+ "case",
653
+ [
654
+ pd.Series([1], index=pd.Index([1], name="a"), name="a"),
655
+ DataFrame({"A": [1]}, index=pd.Index([1], name="A")),
656
+ DataFrame(
657
+ {"A": [1]},
658
+ index=pd.MultiIndex.from_arrays([["a"], [1]], names=["A", "a"]),
659
+ ),
660
+ ],
661
+ )
662
+ def test_overlapping_names(self, case):
663
+ with pytest.raises(ValueError, match="Overlapping"):
664
+ case.to_json(orient="table")
665
+
666
+ def test_mi_falsey_name(self):
667
+ # GH 16203
668
+ df = DataFrame(
669
+ np.random.default_rng(2).standard_normal((4, 4)),
670
+ index=pd.MultiIndex.from_product([("A", "B"), ("a", "b")]),
671
+ )
672
+ result = [x["name"] for x in build_table_schema(df)["fields"]]
673
+ assert result == ["level_0", "level_1", 0, 1, 2, 3]
674
+
675
+
676
+ class TestTableOrientReader:
677
+ @pytest.mark.parametrize(
678
+ "index_nm",
679
+ [None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"],
680
+ )
681
+ @pytest.mark.parametrize(
682
+ "vals",
683
+ [
684
+ {"ints": [1, 2, 3, 4]},
685
+ {"objects": ["a", "b", "c", "d"]},
686
+ {"objects": ["1", "2", "3", "4"]},
687
+ {"date_ranges": pd.date_range("2016-01-01", freq="d", periods=4)},
688
+ {"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))},
689
+ {
690
+ "ordered_cats": pd.Series(
691
+ pd.Categorical(["a", "b", "c", "c"], ordered=True)
692
+ )
693
+ },
694
+ {"floats": [1.0, 2.0, 3.0, 4.0]},
695
+ {"floats": [1.1, 2.2, 3.3, 4.4]},
696
+ {"bools": [True, False, False, True]},
697
+ {
698
+ "timezones": pd.date_range(
699
+ "2016-01-01", freq="d", periods=4, tz="US/Central"
700
+ ) # added in # GH 35973
701
+ },
702
+ ],
703
+ )
704
+ def test_read_json_table_orient(self, index_nm, vals, recwarn):
705
+ df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))
706
+ out = df.to_json(orient="table")
707
+ result = pd.read_json(out, orient="table")
708
+ tm.assert_frame_equal(df, result)
709
+
710
+ @pytest.mark.parametrize("index_nm", [None, "idx", "index"])
711
+ @pytest.mark.parametrize(
712
+ "vals",
713
+ [{"timedeltas": pd.timedelta_range("1h", periods=4, freq="min")}],
714
+ )
715
+ def test_read_json_table_orient_raises(self, index_nm, vals, recwarn):
716
+ df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))
717
+ out = df.to_json(orient="table")
718
+ with pytest.raises(NotImplementedError, match="can not yet read "):
719
+ pd.read_json(out, orient="table")
720
+
721
+ @pytest.mark.parametrize(
722
+ "index_nm",
723
+ [None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"],
724
+ )
725
+ @pytest.mark.parametrize(
726
+ "vals",
727
+ [
728
+ {"ints": [1, 2, 3, 4]},
729
+ {"objects": ["a", "b", "c", "d"]},
730
+ {"objects": ["1", "2", "3", "4"]},
731
+ {"date_ranges": pd.date_range("2016-01-01", freq="d", periods=4)},
732
+ {"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))},
733
+ {
734
+ "ordered_cats": pd.Series(
735
+ pd.Categorical(["a", "b", "c", "c"], ordered=True)
736
+ )
737
+ },
738
+ {"floats": [1.0, 2.0, 3.0, 4.0]},
739
+ {"floats": [1.1, 2.2, 3.3, 4.4]},
740
+ {"bools": [True, False, False, True]},
741
+ {
742
+ "timezones": pd.date_range(
743
+ "2016-01-01", freq="d", periods=4, tz="US/Central"
744
+ ) # added in # GH 35973
745
+ },
746
+ ],
747
+ )
748
+ def test_read_json_table_period_orient(self, index_nm, vals, recwarn):
749
+ df = DataFrame(
750
+ vals,
751
+ index=pd.Index(
752
+ (pd.Period(f"2022Q{q}") for q in range(1, 5)), name=index_nm
753
+ ),
754
+ )
755
+ out = df.to_json(orient="table")
756
+ result = pd.read_json(out, orient="table")
757
+ tm.assert_frame_equal(df, result)
758
+
759
+ @pytest.mark.parametrize(
760
+ "idx",
761
+ [
762
+ pd.Index(range(4)),
763
+ pd.date_range(
764
+ "2020-08-30",
765
+ freq="d",
766
+ periods=4,
767
+ )._with_freq(None),
768
+ pd.date_range(
769
+ "2020-08-30", freq="d", periods=4, tz="US/Central"
770
+ )._with_freq(None),
771
+ pd.MultiIndex.from_product(
772
+ [
773
+ pd.date_range("2020-08-30", freq="d", periods=2, tz="US/Central"),
774
+ ["x", "y"],
775
+ ],
776
+ ),
777
+ ],
778
+ )
779
+ @pytest.mark.parametrize(
780
+ "vals",
781
+ [
782
+ {"floats": [1.1, 2.2, 3.3, 4.4]},
783
+ {"dates": pd.date_range("2020-08-30", freq="d", periods=4)},
784
+ {
785
+ "timezones": pd.date_range(
786
+ "2020-08-30", freq="d", periods=4, tz="Europe/London"
787
+ )
788
+ },
789
+ ],
790
+ )
791
+ def test_read_json_table_timezones_orient(self, idx, vals, recwarn):
792
+ # GH 35973
793
+ df = DataFrame(vals, index=idx)
794
+ out = df.to_json(orient="table")
795
+ result = pd.read_json(out, orient="table")
796
+ tm.assert_frame_equal(df, result)
797
+
798
+ def test_comprehensive(self):
799
+ df = DataFrame(
800
+ {
801
+ "A": [1, 2, 3, 4],
802
+ "B": ["a", "b", "c", "c"],
803
+ "C": pd.date_range("2016-01-01", freq="d", periods=4),
804
+ # 'D': pd.timedelta_range('1h', periods=4, freq='min'),
805
+ "E": pd.Series(pd.Categorical(["a", "b", "c", "c"])),
806
+ "F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)),
807
+ "G": [1.1, 2.2, 3.3, 4.4],
808
+ "H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"),
809
+ "I": [True, False, False, True],
810
+ },
811
+ index=pd.Index(range(4), name="idx"),
812
+ )
813
+
814
+ out = StringIO(df.to_json(orient="table"))
815
+ result = pd.read_json(out, orient="table")
816
+ tm.assert_frame_equal(df, result)
817
+
818
+ @pytest.mark.parametrize(
819
+ "index_names",
820
+ [[None, None], ["foo", "bar"], ["foo", None], [None, "foo"], ["index", "foo"]],
821
+ )
822
+ def test_multiindex(self, index_names):
823
+ # GH 18912
824
+ df = DataFrame(
825
+ [["Arr", "alpha", [1, 2, 3, 4]], ["Bee", "Beta", [10, 20, 30, 40]]],
826
+ index=[["A", "B"], ["Null", "Eins"]],
827
+ columns=["Aussprache", "Griechisch", "Args"],
828
+ )
829
+ df.index.names = index_names
830
+ out = StringIO(df.to_json(orient="table"))
831
+ result = pd.read_json(out, orient="table")
832
+ tm.assert_frame_equal(df, result)
833
+
834
+ def test_empty_frame_roundtrip(self):
835
+ # GH 21287
836
+ df = DataFrame(columns=["a", "b", "c"])
837
+ expected = df.copy()
838
+ out = StringIO(df.to_json(orient="table"))
839
+ result = pd.read_json(out, orient="table")
840
+ tm.assert_frame_equal(expected, result)
841
+
842
+ def test_read_json_orient_table_old_schema_version(self):
843
+ df_json = """
844
+ {
845
+ "schema":{
846
+ "fields":[
847
+ {"name":"index","type":"integer"},
848
+ {"name":"a","type":"string"}
849
+ ],
850
+ "primaryKey":["index"],
851
+ "pandas_version":"0.20.0"
852
+ },
853
+ "data":[
854
+ {"index":0,"a":1},
855
+ {"index":1,"a":2.0},
856
+ {"index":2,"a":"s"}
857
+ ]
858
+ }
859
+ """
860
+ expected = DataFrame({"a": [1, 2.0, "s"]})
861
+ result = pd.read_json(StringIO(df_json), orient="table")
862
+ tm.assert_frame_equal(expected, result)
863
+
864
+ @pytest.mark.parametrize("freq", ["M", "2M", "Q", "2Q", "Y", "2Y"])
865
+ def test_read_json_table_orient_period_depr_freq(self, freq, recwarn):
866
+ # GH#9586
867
+ df = DataFrame(
868
+ {"ints": [1, 2]},
869
+ index=pd.PeriodIndex(["2020-01", "2021-06"], freq=freq),
870
+ )
871
+ out = df.to_json(orient="table")
872
+ result = pd.read_json(out, orient="table")
873
+ tm.assert_frame_equal(df, result)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for ExtensionDtype Table Schema integration."""
2
+
3
+ from collections import OrderedDict
4
+ import datetime as dt
5
+ import decimal
6
+ from io import StringIO
7
+ import json
8
+
9
+ import pytest
10
+
11
+ from pandas import (
12
+ NA,
13
+ DataFrame,
14
+ Index,
15
+ array,
16
+ read_json,
17
+ )
18
+ import pandas._testing as tm
19
+ from pandas.core.arrays.integer import Int64Dtype
20
+ from pandas.core.arrays.string_ import StringDtype
21
+ from pandas.core.series import Series
22
+ from pandas.tests.extension.date import (
23
+ DateArray,
24
+ DateDtype,
25
+ )
26
+ from pandas.tests.extension.decimal.array import (
27
+ DecimalArray,
28
+ DecimalDtype,
29
+ )
30
+
31
+ from pandas.io.json._table_schema import (
32
+ as_json_table_type,
33
+ build_table_schema,
34
+ )
35
+
36
+
37
+ class TestBuildSchema:
38
+ def test_build_table_schema(self):
39
+ df = DataFrame(
40
+ {
41
+ "A": DateArray([dt.date(2021, 10, 10)]),
42
+ "B": DecimalArray([decimal.Decimal(10)]),
43
+ "C": array(["pandas"], dtype="string"),
44
+ "D": array([10], dtype="Int64"),
45
+ }
46
+ )
47
+ result = build_table_schema(df, version=False)
48
+ expected = {
49
+ "fields": [
50
+ {"name": "index", "type": "integer"},
51
+ {"name": "A", "type": "any", "extDtype": "DateDtype"},
52
+ {"name": "B", "type": "number", "extDtype": "decimal"},
53
+ {"name": "C", "type": "any", "extDtype": "string"},
54
+ {"name": "D", "type": "integer", "extDtype": "Int64"},
55
+ ],
56
+ "primaryKey": ["index"],
57
+ }
58
+ assert result == expected
59
+ result = build_table_schema(df)
60
+ assert "pandas_version" in result
61
+
62
+
63
+ class TestTableSchemaType:
64
+ @pytest.mark.parametrize(
65
+ "date_data",
66
+ [
67
+ DateArray([dt.date(2021, 10, 10)]),
68
+ DateArray(dt.date(2021, 10, 10)),
69
+ Series(DateArray(dt.date(2021, 10, 10))),
70
+ ],
71
+ )
72
+ def test_as_json_table_type_ext_date_array_dtype(self, date_data):
73
+ assert as_json_table_type(date_data.dtype) == "any"
74
+
75
+ def test_as_json_table_type_ext_date_dtype(self):
76
+ assert as_json_table_type(DateDtype()) == "any"
77
+
78
+ @pytest.mark.parametrize(
79
+ "decimal_data",
80
+ [
81
+ DecimalArray([decimal.Decimal(10)]),
82
+ Series(DecimalArray([decimal.Decimal(10)])),
83
+ ],
84
+ )
85
+ def test_as_json_table_type_ext_decimal_array_dtype(self, decimal_data):
86
+ assert as_json_table_type(decimal_data.dtype) == "number"
87
+
88
+ def test_as_json_table_type_ext_decimal_dtype(self):
89
+ assert as_json_table_type(DecimalDtype()) == "number"
90
+
91
+ @pytest.mark.parametrize(
92
+ "string_data",
93
+ [
94
+ array(["pandas"], dtype="string"),
95
+ Series(array(["pandas"], dtype="string")),
96
+ ],
97
+ )
98
+ def test_as_json_table_type_ext_string_array_dtype(self, string_data):
99
+ assert as_json_table_type(string_data.dtype) == "any"
100
+
101
+ def test_as_json_table_type_ext_string_dtype(self):
102
+ assert as_json_table_type(StringDtype()) == "any"
103
+
104
+ @pytest.mark.parametrize(
105
+ "integer_data",
106
+ [
107
+ array([10], dtype="Int64"),
108
+ Series(array([10], dtype="Int64")),
109
+ ],
110
+ )
111
+ def test_as_json_table_type_ext_integer_array_dtype(self, integer_data):
112
+ assert as_json_table_type(integer_data.dtype) == "integer"
113
+
114
+ def test_as_json_table_type_ext_integer_dtype(self):
115
+ assert as_json_table_type(Int64Dtype()) == "integer"
116
+
117
+
118
+ class TestTableOrient:
119
+ @pytest.fixture
120
+ def da(self):
121
+ return DateArray([dt.date(2021, 10, 10)])
122
+
123
+ @pytest.fixture
124
+ def dc(self):
125
+ return DecimalArray([decimal.Decimal(10)])
126
+
127
+ @pytest.fixture
128
+ def sa(self):
129
+ return array(["pandas"], dtype="string")
130
+
131
+ @pytest.fixture
132
+ def ia(self):
133
+ return array([10], dtype="Int64")
134
+
135
+ @pytest.fixture
136
+ def df(self, da, dc, sa, ia):
137
+ return DataFrame(
138
+ {
139
+ "A": da,
140
+ "B": dc,
141
+ "C": sa,
142
+ "D": ia,
143
+ }
144
+ )
145
+
146
+ def test_build_date_series(self, da):
147
+ s = Series(da, name="a")
148
+ s.index.name = "id"
149
+ result = s.to_json(orient="table", date_format="iso")
150
+ result = json.loads(result, object_pairs_hook=OrderedDict)
151
+
152
+ assert "pandas_version" in result["schema"]
153
+ result["schema"].pop("pandas_version")
154
+
155
+ fields = [
156
+ {"name": "id", "type": "integer"},
157
+ {"name": "a", "type": "any", "extDtype": "DateDtype"},
158
+ ]
159
+
160
+ schema = {"fields": fields, "primaryKey": ["id"]}
161
+
162
+ expected = OrderedDict(
163
+ [
164
+ ("schema", schema),
165
+ ("data", [OrderedDict([("id", 0), ("a", "2021-10-10T00:00:00.000")])]),
166
+ ]
167
+ )
168
+
169
+ assert result == expected
170
+
171
+ def test_build_decimal_series(self, dc):
172
+ s = Series(dc, name="a")
173
+ s.index.name = "id"
174
+ result = s.to_json(orient="table", date_format="iso")
175
+ result = json.loads(result, object_pairs_hook=OrderedDict)
176
+
177
+ assert "pandas_version" in result["schema"]
178
+ result["schema"].pop("pandas_version")
179
+
180
+ fields = [
181
+ {"name": "id", "type": "integer"},
182
+ {"name": "a", "type": "number", "extDtype": "decimal"},
183
+ ]
184
+
185
+ schema = {"fields": fields, "primaryKey": ["id"]}
186
+
187
+ expected = OrderedDict(
188
+ [
189
+ ("schema", schema),
190
+ ("data", [OrderedDict([("id", 0), ("a", 10.0)])]),
191
+ ]
192
+ )
193
+
194
+ assert result == expected
195
+
196
+ def test_build_string_series(self, sa):
197
+ s = Series(sa, name="a")
198
+ s.index.name = "id"
199
+ result = s.to_json(orient="table", date_format="iso")
200
+ result = json.loads(result, object_pairs_hook=OrderedDict)
201
+
202
+ assert "pandas_version" in result["schema"]
203
+ result["schema"].pop("pandas_version")
204
+
205
+ fields = [
206
+ {"name": "id", "type": "integer"},
207
+ {"name": "a", "type": "any", "extDtype": "string"},
208
+ ]
209
+
210
+ schema = {"fields": fields, "primaryKey": ["id"]}
211
+
212
+ expected = OrderedDict(
213
+ [
214
+ ("schema", schema),
215
+ ("data", [OrderedDict([("id", 0), ("a", "pandas")])]),
216
+ ]
217
+ )
218
+
219
+ assert result == expected
220
+
221
+ def test_build_int64_series(self, ia):
222
+ s = Series(ia, name="a")
223
+ s.index.name = "id"
224
+ result = s.to_json(orient="table", date_format="iso")
225
+ result = json.loads(result, object_pairs_hook=OrderedDict)
226
+
227
+ assert "pandas_version" in result["schema"]
228
+ result["schema"].pop("pandas_version")
229
+
230
+ fields = [
231
+ {"name": "id", "type": "integer"},
232
+ {"name": "a", "type": "integer", "extDtype": "Int64"},
233
+ ]
234
+
235
+ schema = {"fields": fields, "primaryKey": ["id"]}
236
+
237
+ expected = OrderedDict(
238
+ [
239
+ ("schema", schema),
240
+ ("data", [OrderedDict([("id", 0), ("a", 10)])]),
241
+ ]
242
+ )
243
+
244
+ assert result == expected
245
+
246
+ def test_to_json(self, df):
247
+ df = df.copy()
248
+ df.index.name = "idx"
249
+ result = df.to_json(orient="table", date_format="iso")
250
+ result = json.loads(result, object_pairs_hook=OrderedDict)
251
+
252
+ assert "pandas_version" in result["schema"]
253
+ result["schema"].pop("pandas_version")
254
+
255
+ fields = [
256
+ OrderedDict({"name": "idx", "type": "integer"}),
257
+ OrderedDict({"name": "A", "type": "any", "extDtype": "DateDtype"}),
258
+ OrderedDict({"name": "B", "type": "number", "extDtype": "decimal"}),
259
+ OrderedDict({"name": "C", "type": "any", "extDtype": "string"}),
260
+ OrderedDict({"name": "D", "type": "integer", "extDtype": "Int64"}),
261
+ ]
262
+
263
+ schema = OrderedDict({"fields": fields, "primaryKey": ["idx"]})
264
+ data = [
265
+ OrderedDict(
266
+ [
267
+ ("idx", 0),
268
+ ("A", "2021-10-10T00:00:00.000"),
269
+ ("B", 10.0),
270
+ ("C", "pandas"),
271
+ ("D", 10),
272
+ ]
273
+ )
274
+ ]
275
+ expected = OrderedDict([("schema", schema), ("data", data)])
276
+
277
+ assert result == expected
278
+
279
+ def test_json_ext_dtype_reading_roundtrip(self):
280
+ # GH#40255
281
+ df = DataFrame(
282
+ {
283
+ "a": Series([2, NA], dtype="Int64"),
284
+ "b": Series([1.5, NA], dtype="Float64"),
285
+ "c": Series([True, NA], dtype="boolean"),
286
+ },
287
+ index=Index([1, NA], dtype="Int64"),
288
+ )
289
+ expected = df.copy()
290
+ data_json = df.to_json(orient="table", indent=4)
291
+ result = read_json(StringIO(data_json), orient="table")
292
+ tm.assert_frame_equal(result, expected)
293
+
294
+ def test_json_ext_dtype_reading(self):
295
+ # GH#40255
296
+ data_json = """{
297
+ "schema":{
298
+ "fields":[
299
+ {
300
+ "name":"a",
301
+ "type":"integer",
302
+ "extDtype":"Int64"
303
+ }
304
+ ],
305
+ },
306
+ "data":[
307
+ {
308
+ "a":2
309
+ },
310
+ {
311
+ "a":null
312
+ }
313
+ ]
314
+ }"""
315
+ result = read_json(StringIO(data_json), orient="table")
316
+ expected = DataFrame({"a": Series([2, NA], dtype="Int64")})
317
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_normalize.py ADDED
@@ -0,0 +1,907 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ Index,
9
+ Series,
10
+ json_normalize,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+ from pandas.io.json._normalize import nested_to_record
15
+
16
+
17
+ @pytest.fixture
18
+ def deep_nested():
19
+ # deeply nested data
20
+ return [
21
+ {
22
+ "country": "USA",
23
+ "states": [
24
+ {
25
+ "name": "California",
26
+ "cities": [
27
+ {"name": "San Francisco", "pop": 12345},
28
+ {"name": "Los Angeles", "pop": 12346},
29
+ ],
30
+ },
31
+ {
32
+ "name": "Ohio",
33
+ "cities": [
34
+ {"name": "Columbus", "pop": 1234},
35
+ {"name": "Cleveland", "pop": 1236},
36
+ ],
37
+ },
38
+ ],
39
+ },
40
+ {
41
+ "country": "Germany",
42
+ "states": [
43
+ {"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]},
44
+ {
45
+ "name": "Nordrhein-Westfalen",
46
+ "cities": [
47
+ {"name": "Duesseldorf", "pop": 1238},
48
+ {"name": "Koeln", "pop": 1239},
49
+ ],
50
+ },
51
+ ],
52
+ },
53
+ ]
54
+
55
+
56
+ @pytest.fixture
57
+ def state_data():
58
+ return [
59
+ {
60
+ "counties": [
61
+ {"name": "Dade", "population": 12345},
62
+ {"name": "Broward", "population": 40000},
63
+ {"name": "Palm Beach", "population": 60000},
64
+ ],
65
+ "info": {"governor": "Rick Scott"},
66
+ "shortname": "FL",
67
+ "state": "Florida",
68
+ },
69
+ {
70
+ "counties": [
71
+ {"name": "Summit", "population": 1234},
72
+ {"name": "Cuyahoga", "population": 1337},
73
+ ],
74
+ "info": {"governor": "John Kasich"},
75
+ "shortname": "OH",
76
+ "state": "Ohio",
77
+ },
78
+ ]
79
+
80
+
81
+ @pytest.fixture
82
+ def author_missing_data():
83
+ return [
84
+ {"info": None},
85
+ {
86
+ "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
87
+ "author_name": {"first": "Jane", "last_name": "Doe"},
88
+ },
89
+ ]
90
+
91
+
92
+ @pytest.fixture
93
+ def missing_metadata():
94
+ return [
95
+ {
96
+ "name": "Alice",
97
+ "addresses": [
98
+ {
99
+ "number": 9562,
100
+ "street": "Morris St.",
101
+ "city": "Massillon",
102
+ "state": "OH",
103
+ "zip": 44646,
104
+ }
105
+ ],
106
+ "previous_residences": {"cities": [{"city_name": "Foo York City"}]},
107
+ },
108
+ {
109
+ "addresses": [
110
+ {
111
+ "number": 8449,
112
+ "street": "Spring St.",
113
+ "city": "Elizabethton",
114
+ "state": "TN",
115
+ "zip": 37643,
116
+ }
117
+ ],
118
+ "previous_residences": {"cities": [{"city_name": "Barmingham"}]},
119
+ },
120
+ ]
121
+
122
+
123
+ @pytest.fixture
124
+ def max_level_test_input_data():
125
+ """
126
+ input data to test json_normalize with max_level param
127
+ """
128
+ return [
129
+ {
130
+ "CreatedBy": {"Name": "User001"},
131
+ "Lookup": {
132
+ "TextField": "Some text",
133
+ "UserField": {"Id": "ID001", "Name": "Name001"},
134
+ },
135
+ "Image": {"a": "b"},
136
+ }
137
+ ]
138
+
139
+
140
+ class TestJSONNormalize:
141
+ def test_simple_records(self):
142
+ recs = [
143
+ {"a": 1, "b": 2, "c": 3},
144
+ {"a": 4, "b": 5, "c": 6},
145
+ {"a": 7, "b": 8, "c": 9},
146
+ {"a": 10, "b": 11, "c": 12},
147
+ ]
148
+
149
+ result = json_normalize(recs)
150
+ expected = DataFrame(recs)
151
+
152
+ tm.assert_frame_equal(result, expected)
153
+
154
+ def test_simple_normalize(self, state_data):
155
+ result = json_normalize(state_data[0], "counties")
156
+ expected = DataFrame(state_data[0]["counties"])
157
+ tm.assert_frame_equal(result, expected)
158
+
159
+ result = json_normalize(state_data, "counties")
160
+
161
+ expected = []
162
+ for rec in state_data:
163
+ expected.extend(rec["counties"])
164
+ expected = DataFrame(expected)
165
+
166
+ tm.assert_frame_equal(result, expected)
167
+
168
+ result = json_normalize(state_data, "counties", meta="state")
169
+ expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
170
+
171
+ tm.assert_frame_equal(result, expected)
172
+
173
+ def test_fields_list_type_normalize(self):
174
+ parse_metadata_fields_list_type = [
175
+ {"values": [1, 2, 3], "metadata": {"listdata": [1, 2]}}
176
+ ]
177
+ result = json_normalize(
178
+ parse_metadata_fields_list_type,
179
+ record_path=["values"],
180
+ meta=[["metadata", "listdata"]],
181
+ )
182
+ expected = DataFrame(
183
+ {0: [1, 2, 3], "metadata.listdata": [[1, 2], [1, 2], [1, 2]]}
184
+ )
185
+ tm.assert_frame_equal(result, expected)
186
+
187
+ def test_empty_array(self):
188
+ result = json_normalize([])
189
+ expected = DataFrame()
190
+ tm.assert_frame_equal(result, expected)
191
+
192
+ @pytest.mark.parametrize(
193
+ "data, record_path, exception_type",
194
+ [
195
+ ([{"a": 0}, {"a": 1}], None, None),
196
+ ({"a": [{"a": 0}, {"a": 1}]}, "a", None),
197
+ ('{"a": [{"a": 0}, {"a": 1}]}', None, NotImplementedError),
198
+ (None, None, NotImplementedError),
199
+ ],
200
+ )
201
+ def test_accepted_input(self, data, record_path, exception_type):
202
+ if exception_type is not None:
203
+ with pytest.raises(exception_type, match=""):
204
+ json_normalize(data, record_path=record_path)
205
+ else:
206
+ result = json_normalize(data, record_path=record_path)
207
+ expected = DataFrame([0, 1], columns=["a"])
208
+ tm.assert_frame_equal(result, expected)
209
+
210
+ def test_simple_normalize_with_separator(self, deep_nested):
211
+ # GH 14883
212
+ result = json_normalize({"A": {"A": 1, "B": 2}})
213
+ expected = DataFrame([[1, 2]], columns=["A.A", "A.B"])
214
+ tm.assert_frame_equal(result.reindex_like(expected), expected)
215
+
216
+ result = json_normalize({"A": {"A": 1, "B": 2}}, sep="_")
217
+ expected = DataFrame([[1, 2]], columns=["A_A", "A_B"])
218
+ tm.assert_frame_equal(result.reindex_like(expected), expected)
219
+
220
+ result = json_normalize({"A": {"A": 1, "B": 2}}, sep="\u03c3")
221
+ expected = DataFrame([[1, 2]], columns=["A\u03c3A", "A\u03c3B"])
222
+ tm.assert_frame_equal(result.reindex_like(expected), expected)
223
+
224
+ result = json_normalize(
225
+ deep_nested,
226
+ ["states", "cities"],
227
+ meta=["country", ["states", "name"]],
228
+ sep="_",
229
+ )
230
+ expected = Index(["name", "pop", "country", "states_name"]).sort_values()
231
+ assert result.columns.sort_values().equals(expected)
232
+
233
+ def test_normalize_with_multichar_separator(self):
234
+ # GH #43831
235
+ data = {"a": [1, 2], "b": {"b_1": 2, "b_2": (3, 4)}}
236
+ result = json_normalize(data, sep="__")
237
+ expected = DataFrame([[[1, 2], 2, (3, 4)]], columns=["a", "b__b_1", "b__b_2"])
238
+ tm.assert_frame_equal(result, expected)
239
+
240
+ def test_value_array_record_prefix(self):
241
+ # GH 21536
242
+ result = json_normalize({"A": [1, 2]}, "A", record_prefix="Prefix.")
243
+ expected = DataFrame([[1], [2]], columns=["Prefix.0"])
244
+ tm.assert_frame_equal(result, expected)
245
+
246
+ def test_nested_object_record_path(self):
247
+ # GH 22706
248
+ data = {
249
+ "state": "Florida",
250
+ "info": {
251
+ "governor": "Rick Scott",
252
+ "counties": [
253
+ {"name": "Dade", "population": 12345},
254
+ {"name": "Broward", "population": 40000},
255
+ {"name": "Palm Beach", "population": 60000},
256
+ ],
257
+ },
258
+ }
259
+ result = json_normalize(data, record_path=["info", "counties"])
260
+ expected = DataFrame(
261
+ [["Dade", 12345], ["Broward", 40000], ["Palm Beach", 60000]],
262
+ columns=["name", "population"],
263
+ )
264
+ tm.assert_frame_equal(result, expected)
265
+
266
+ def test_more_deeply_nested(self, deep_nested):
267
+ result = json_normalize(
268
+ deep_nested, ["states", "cities"], meta=["country", ["states", "name"]]
269
+ )
270
+ ex_data = {
271
+ "country": ["USA"] * 4 + ["Germany"] * 3,
272
+ "states.name": [
273
+ "California",
274
+ "California",
275
+ "Ohio",
276
+ "Ohio",
277
+ "Bayern",
278
+ "Nordrhein-Westfalen",
279
+ "Nordrhein-Westfalen",
280
+ ],
281
+ "name": [
282
+ "San Francisco",
283
+ "Los Angeles",
284
+ "Columbus",
285
+ "Cleveland",
286
+ "Munich",
287
+ "Duesseldorf",
288
+ "Koeln",
289
+ ],
290
+ "pop": [12345, 12346, 1234, 1236, 12347, 1238, 1239],
291
+ }
292
+
293
+ expected = DataFrame(ex_data, columns=result.columns)
294
+ tm.assert_frame_equal(result, expected)
295
+
296
+ def test_shallow_nested(self):
297
+ data = [
298
+ {
299
+ "state": "Florida",
300
+ "shortname": "FL",
301
+ "info": {"governor": "Rick Scott"},
302
+ "counties": [
303
+ {"name": "Dade", "population": 12345},
304
+ {"name": "Broward", "population": 40000},
305
+ {"name": "Palm Beach", "population": 60000},
306
+ ],
307
+ },
308
+ {
309
+ "state": "Ohio",
310
+ "shortname": "OH",
311
+ "info": {"governor": "John Kasich"},
312
+ "counties": [
313
+ {"name": "Summit", "population": 1234},
314
+ {"name": "Cuyahoga", "population": 1337},
315
+ ],
316
+ },
317
+ ]
318
+
319
+ result = json_normalize(
320
+ data, "counties", ["state", "shortname", ["info", "governor"]]
321
+ )
322
+ ex_data = {
323
+ "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"],
324
+ "state": ["Florida"] * 3 + ["Ohio"] * 2,
325
+ "shortname": ["FL", "FL", "FL", "OH", "OH"],
326
+ "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2,
327
+ "population": [12345, 40000, 60000, 1234, 1337],
328
+ }
329
+ expected = DataFrame(ex_data, columns=result.columns)
330
+ tm.assert_frame_equal(result, expected)
331
+
332
+ def test_nested_meta_path_with_nested_record_path(self, state_data):
333
+ # GH 27220
334
+ result = json_normalize(
335
+ data=state_data,
336
+ record_path=["counties"],
337
+ meta=["state", "shortname", ["info", "governor"]],
338
+ errors="ignore",
339
+ )
340
+
341
+ ex_data = {
342
+ "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"],
343
+ "population": [12345, 40000, 60000, 1234, 1337],
344
+ "state": ["Florida"] * 3 + ["Ohio"] * 2,
345
+ "shortname": ["FL"] * 3 + ["OH"] * 2,
346
+ "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2,
347
+ }
348
+
349
+ expected = DataFrame(ex_data)
350
+ tm.assert_frame_equal(result, expected)
351
+
352
+ def test_meta_name_conflict(self):
353
+ data = [
354
+ {
355
+ "foo": "hello",
356
+ "bar": "there",
357
+ "data": [
358
+ {"foo": "something", "bar": "else"},
359
+ {"foo": "something2", "bar": "else2"},
360
+ ],
361
+ }
362
+ ]
363
+
364
+ msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix"
365
+ with pytest.raises(ValueError, match=msg):
366
+ json_normalize(data, "data", meta=["foo", "bar"])
367
+
368
+ result = json_normalize(data, "data", meta=["foo", "bar"], meta_prefix="meta")
369
+
370
+ for val in ["metafoo", "metabar", "foo", "bar"]:
371
+ assert val in result
372
+
373
+ def test_meta_parameter_not_modified(self):
374
+ # GH 18610
375
+ data = [
376
+ {
377
+ "foo": "hello",
378
+ "bar": "there",
379
+ "data": [
380
+ {"foo": "something", "bar": "else"},
381
+ {"foo": "something2", "bar": "else2"},
382
+ ],
383
+ }
384
+ ]
385
+
386
+ COLUMNS = ["foo", "bar"]
387
+ result = json_normalize(data, "data", meta=COLUMNS, meta_prefix="meta")
388
+
389
+ assert COLUMNS == ["foo", "bar"]
390
+ for val in ["metafoo", "metabar", "foo", "bar"]:
391
+ assert val in result
392
+
393
+ def test_record_prefix(self, state_data):
394
+ result = json_normalize(state_data[0], "counties")
395
+ expected = DataFrame(state_data[0]["counties"])
396
+ tm.assert_frame_equal(result, expected)
397
+
398
+ result = json_normalize(
399
+ state_data, "counties", meta="state", record_prefix="county_"
400
+ )
401
+
402
+ expected = []
403
+ for rec in state_data:
404
+ expected.extend(rec["counties"])
405
+ expected = DataFrame(expected)
406
+ expected = expected.rename(columns=lambda x: "county_" + x)
407
+ expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
408
+
409
+ tm.assert_frame_equal(result, expected)
410
+
411
+ def test_non_ascii_key(self):
412
+ testjson = (
413
+ b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
414
+ b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
415
+ ).decode("utf8")
416
+
417
+ testdata = {
418
+ b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1],
419
+ "sub.A": [1, 3],
420
+ "sub.B": [2, 4],
421
+ }
422
+ expected = DataFrame(testdata)
423
+
424
+ result = json_normalize(json.loads(testjson))
425
+ tm.assert_frame_equal(result, expected)
426
+
427
+ def test_missing_field(self, author_missing_data):
428
+ # GH20030:
429
+ result = json_normalize(author_missing_data)
430
+ ex_data = [
431
+ {
432
+ "info": np.nan,
433
+ "info.created_at": np.nan,
434
+ "info.last_updated": np.nan,
435
+ "author_name.first": np.nan,
436
+ "author_name.last_name": np.nan,
437
+ },
438
+ {
439
+ "info": None,
440
+ "info.created_at": "11/08/1993",
441
+ "info.last_updated": "26/05/2012",
442
+ "author_name.first": "Jane",
443
+ "author_name.last_name": "Doe",
444
+ },
445
+ ]
446
+ expected = DataFrame(ex_data)
447
+ tm.assert_frame_equal(result, expected)
448
+
449
+ @pytest.mark.parametrize(
450
+ "max_level,expected",
451
+ [
452
+ (
453
+ 0,
454
+ [
455
+ {
456
+ "TextField": "Some text",
457
+ "UserField": {"Id": "ID001", "Name": "Name001"},
458
+ "CreatedBy": {"Name": "User001"},
459
+ "Image": {"a": "b"},
460
+ },
461
+ {
462
+ "TextField": "Some text",
463
+ "UserField": {"Id": "ID001", "Name": "Name001"},
464
+ "CreatedBy": {"Name": "User001"},
465
+ "Image": {"a": "b"},
466
+ },
467
+ ],
468
+ ),
469
+ (
470
+ 1,
471
+ [
472
+ {
473
+ "TextField": "Some text",
474
+ "UserField.Id": "ID001",
475
+ "UserField.Name": "Name001",
476
+ "CreatedBy": {"Name": "User001"},
477
+ "Image": {"a": "b"},
478
+ },
479
+ {
480
+ "TextField": "Some text",
481
+ "UserField.Id": "ID001",
482
+ "UserField.Name": "Name001",
483
+ "CreatedBy": {"Name": "User001"},
484
+ "Image": {"a": "b"},
485
+ },
486
+ ],
487
+ ),
488
+ ],
489
+ )
490
+ def test_max_level_with_records_path(self, max_level, expected):
491
+ # GH23843: Enhanced JSON normalize
492
+ test_input = [
493
+ {
494
+ "CreatedBy": {"Name": "User001"},
495
+ "Lookup": [
496
+ {
497
+ "TextField": "Some text",
498
+ "UserField": {"Id": "ID001", "Name": "Name001"},
499
+ },
500
+ {
501
+ "TextField": "Some text",
502
+ "UserField": {"Id": "ID001", "Name": "Name001"},
503
+ },
504
+ ],
505
+ "Image": {"a": "b"},
506
+ "tags": [
507
+ {"foo": "something", "bar": "else"},
508
+ {"foo": "something2", "bar": "else2"},
509
+ ],
510
+ }
511
+ ]
512
+
513
+ result = json_normalize(
514
+ test_input,
515
+ record_path=["Lookup"],
516
+ meta=[["CreatedBy"], ["Image"]],
517
+ max_level=max_level,
518
+ )
519
+ expected_df = DataFrame(data=expected, columns=result.columns.values)
520
+ tm.assert_equal(expected_df, result)
521
+
522
+ def test_nested_flattening_consistent(self):
523
+ # see gh-21537
524
+ df1 = json_normalize([{"A": {"B": 1}}])
525
+ df2 = json_normalize({"dummy": [{"A": {"B": 1}}]}, "dummy")
526
+
527
+ # They should be the same.
528
+ tm.assert_frame_equal(df1, df2)
529
+
530
+ def test_nonetype_record_path(self, nulls_fixture):
531
+ # see gh-30148
532
+ # should not raise TypeError
533
+ result = json_normalize(
534
+ [
535
+ {"state": "Texas", "info": nulls_fixture},
536
+ {"state": "Florida", "info": [{"i": 2}]},
537
+ ],
538
+ record_path=["info"],
539
+ )
540
+ expected = DataFrame({"i": 2}, index=[0])
541
+ tm.assert_equal(result, expected)
542
+
543
+ @pytest.mark.parametrize("value", ["false", "true", "{}", "1", '"text"'])
544
+ def test_non_list_record_path_errors(self, value):
545
+ # see gh-30148, GH 26284
546
+ parsed_value = json.loads(value)
547
+ test_input = {"state": "Texas", "info": parsed_value}
548
+ test_path = "info"
549
+ msg = (
550
+ f"{test_input} has non list value {parsed_value} for path {test_path}. "
551
+ "Must be list or null."
552
+ )
553
+ with pytest.raises(TypeError, match=msg):
554
+ json_normalize([test_input], record_path=[test_path])
555
+
556
+ def test_meta_non_iterable(self):
557
+ # GH 31507
558
+ data = """[{"id": 99, "data": [{"one": 1, "two": 2}]}]"""
559
+
560
+ result = json_normalize(json.loads(data), record_path=["data"], meta=["id"])
561
+ expected = DataFrame(
562
+ {"one": [1], "two": [2], "id": np.array([99], dtype=object)}
563
+ )
564
+ tm.assert_frame_equal(result, expected)
565
+
566
+ def test_generator(self, state_data):
567
+ # GH35923 Fix pd.json_normalize to not skip the first element of a
568
+ # generator input
569
+ def generator_data():
570
+ yield from state_data[0]["counties"]
571
+
572
+ result = json_normalize(generator_data())
573
+ expected = DataFrame(state_data[0]["counties"])
574
+
575
+ tm.assert_frame_equal(result, expected)
576
+
577
+ def test_top_column_with_leading_underscore(self):
578
+ # 49861
579
+ data = {"_id": {"a1": 10, "l2": {"l3": 0}}, "gg": 4}
580
+ result = json_normalize(data, sep="_")
581
+ expected = DataFrame([[4, 10, 0]], columns=["gg", "_id_a1", "_id_l2_l3"])
582
+
583
+ tm.assert_frame_equal(result, expected)
584
+
585
+
586
+ class TestNestedToRecord:
587
+ def test_flat_stays_flat(self):
588
+ recs = [{"flat1": 1, "flat2": 2}, {"flat3": 3, "flat2": 4}]
589
+ result = nested_to_record(recs)
590
+ expected = recs
591
+ assert result == expected
592
+
593
+ def test_one_level_deep_flattens(self):
594
+ data = {"flat1": 1, "dict1": {"c": 1, "d": 2}}
595
+
596
+ result = nested_to_record(data)
597
+ expected = {"dict1.c": 1, "dict1.d": 2, "flat1": 1}
598
+
599
+ assert result == expected
600
+
601
+ def test_nested_flattens(self):
602
+ data = {
603
+ "flat1": 1,
604
+ "dict1": {"c": 1, "d": 2},
605
+ "nested": {"e": {"c": 1, "d": 2}, "d": 2},
606
+ }
607
+
608
+ result = nested_to_record(data)
609
+ expected = {
610
+ "dict1.c": 1,
611
+ "dict1.d": 2,
612
+ "flat1": 1,
613
+ "nested.d": 2,
614
+ "nested.e.c": 1,
615
+ "nested.e.d": 2,
616
+ }
617
+
618
+ assert result == expected
619
+
620
+ def test_json_normalize_errors(self, missing_metadata):
621
+ # GH14583:
622
+ # If meta keys are not always present a new option to set
623
+ # errors='ignore' has been implemented
624
+
625
+ msg = (
626
+ "Key 'name' not found. To replace missing values of "
627
+ "'name' with np.nan, pass in errors='ignore'"
628
+ )
629
+ with pytest.raises(KeyError, match=msg):
630
+ json_normalize(
631
+ data=missing_metadata,
632
+ record_path="addresses",
633
+ meta="name",
634
+ errors="raise",
635
+ )
636
+
637
+ def test_missing_meta(self, missing_metadata):
638
+ # GH25468
639
+ # If metadata is nullable with errors set to ignore, the null values
640
+ # should be numpy.nan values
641
+ result = json_normalize(
642
+ data=missing_metadata, record_path="addresses", meta="name", errors="ignore"
643
+ )
644
+ ex_data = [
645
+ [9562, "Morris St.", "Massillon", "OH", 44646, "Alice"],
646
+ [8449, "Spring St.", "Elizabethton", "TN", 37643, np.nan],
647
+ ]
648
+ columns = ["number", "street", "city", "state", "zip", "name"]
649
+ expected = DataFrame(ex_data, columns=columns)
650
+ tm.assert_frame_equal(result, expected)
651
+
652
+ def test_missing_nested_meta(self):
653
+ # GH44312
654
+ # If errors="ignore" and nested metadata is null, we should return nan
655
+ data = {"meta": "foo", "nested_meta": None, "value": [{"rec": 1}, {"rec": 2}]}
656
+ result = json_normalize(
657
+ data,
658
+ record_path="value",
659
+ meta=["meta", ["nested_meta", "leaf"]],
660
+ errors="ignore",
661
+ )
662
+ ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]]
663
+ columns = ["rec", "meta", "nested_meta.leaf"]
664
+ expected = DataFrame(ex_data, columns=columns).astype(
665
+ {"nested_meta.leaf": object}
666
+ )
667
+ tm.assert_frame_equal(result, expected)
668
+
669
+ # If errors="raise" and nested metadata is null, we should raise with the
670
+ # key of the first missing level
671
+ with pytest.raises(KeyError, match="'leaf' not found"):
672
+ json_normalize(
673
+ data,
674
+ record_path="value",
675
+ meta=["meta", ["nested_meta", "leaf"]],
676
+ errors="raise",
677
+ )
678
+
679
+ def test_missing_meta_multilevel_record_path_errors_raise(self, missing_metadata):
680
+ # GH41876
681
+ # Ensure errors='raise' works as intended even when a record_path of length
682
+ # greater than one is passed in
683
+ msg = (
684
+ "Key 'name' not found. To replace missing values of "
685
+ "'name' with np.nan, pass in errors='ignore'"
686
+ )
687
+ with pytest.raises(KeyError, match=msg):
688
+ json_normalize(
689
+ data=missing_metadata,
690
+ record_path=["previous_residences", "cities"],
691
+ meta="name",
692
+ errors="raise",
693
+ )
694
+
695
+ def test_missing_meta_multilevel_record_path_errors_ignore(self, missing_metadata):
696
+ # GH41876
697
+ # Ensure errors='ignore' works as intended even when a record_path of length
698
+ # greater than one is passed in
699
+ result = json_normalize(
700
+ data=missing_metadata,
701
+ record_path=["previous_residences", "cities"],
702
+ meta="name",
703
+ errors="ignore",
704
+ )
705
+ ex_data = [
706
+ ["Foo York City", "Alice"],
707
+ ["Barmingham", np.nan],
708
+ ]
709
+ columns = ["city_name", "name"]
710
+ expected = DataFrame(ex_data, columns=columns)
711
+ tm.assert_frame_equal(result, expected)
712
+
713
+ def test_donot_drop_nonevalues(self):
714
+ # GH21356
715
+ data = [
716
+ {"info": None, "author_name": {"first": "Smith", "last_name": "Appleseed"}},
717
+ {
718
+ "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
719
+ "author_name": {"first": "Jane", "last_name": "Doe"},
720
+ },
721
+ ]
722
+ result = nested_to_record(data)
723
+ expected = [
724
+ {
725
+ "info": None,
726
+ "author_name.first": "Smith",
727
+ "author_name.last_name": "Appleseed",
728
+ },
729
+ {
730
+ "author_name.first": "Jane",
731
+ "author_name.last_name": "Doe",
732
+ "info.created_at": "11/08/1993",
733
+ "info.last_updated": "26/05/2012",
734
+ },
735
+ ]
736
+
737
+ assert result == expected
738
+
739
+ def test_nonetype_top_level_bottom_level(self):
740
+ # GH21158: If inner level json has a key with a null value
741
+ # make sure it does not do a new_d.pop twice and except
742
+ data = {
743
+ "id": None,
744
+ "location": {
745
+ "country": {
746
+ "state": {
747
+ "id": None,
748
+ "town.info": {
749
+ "id": None,
750
+ "region": None,
751
+ "x": 49.151580810546875,
752
+ "y": -33.148521423339844,
753
+ "z": 27.572303771972656,
754
+ },
755
+ }
756
+ }
757
+ },
758
+ }
759
+ result = nested_to_record(data)
760
+ expected = {
761
+ "id": None,
762
+ "location.country.state.id": None,
763
+ "location.country.state.town.info.id": None,
764
+ "location.country.state.town.info.region": None,
765
+ "location.country.state.town.info.x": 49.151580810546875,
766
+ "location.country.state.town.info.y": -33.148521423339844,
767
+ "location.country.state.town.info.z": 27.572303771972656,
768
+ }
769
+ assert result == expected
770
+
771
+ def test_nonetype_multiple_levels(self):
772
+ # GH21158: If inner level json has a key with a null value
773
+ # make sure it does not do a new_d.pop twice and except
774
+ data = {
775
+ "id": None,
776
+ "location": {
777
+ "id": None,
778
+ "country": {
779
+ "id": None,
780
+ "state": {
781
+ "id": None,
782
+ "town.info": {
783
+ "region": None,
784
+ "x": 49.151580810546875,
785
+ "y": -33.148521423339844,
786
+ "z": 27.572303771972656,
787
+ },
788
+ },
789
+ },
790
+ },
791
+ }
792
+ result = nested_to_record(data)
793
+ expected = {
794
+ "id": None,
795
+ "location.id": None,
796
+ "location.country.id": None,
797
+ "location.country.state.id": None,
798
+ "location.country.state.town.info.region": None,
799
+ "location.country.state.town.info.x": 49.151580810546875,
800
+ "location.country.state.town.info.y": -33.148521423339844,
801
+ "location.country.state.town.info.z": 27.572303771972656,
802
+ }
803
+ assert result == expected
804
+
805
+ @pytest.mark.parametrize(
806
+ "max_level, expected",
807
+ [
808
+ (
809
+ None,
810
+ [
811
+ {
812
+ "CreatedBy.Name": "User001",
813
+ "Lookup.TextField": "Some text",
814
+ "Lookup.UserField.Id": "ID001",
815
+ "Lookup.UserField.Name": "Name001",
816
+ "Image.a": "b",
817
+ }
818
+ ],
819
+ ),
820
+ (
821
+ 0,
822
+ [
823
+ {
824
+ "CreatedBy": {"Name": "User001"},
825
+ "Lookup": {
826
+ "TextField": "Some text",
827
+ "UserField": {"Id": "ID001", "Name": "Name001"},
828
+ },
829
+ "Image": {"a": "b"},
830
+ }
831
+ ],
832
+ ),
833
+ (
834
+ 1,
835
+ [
836
+ {
837
+ "CreatedBy.Name": "User001",
838
+ "Lookup.TextField": "Some text",
839
+ "Lookup.UserField": {"Id": "ID001", "Name": "Name001"},
840
+ "Image.a": "b",
841
+ }
842
+ ],
843
+ ),
844
+ ],
845
+ )
846
+ def test_with_max_level(self, max_level, expected, max_level_test_input_data):
847
+ # GH23843: Enhanced JSON normalize
848
+ output = nested_to_record(max_level_test_input_data, max_level=max_level)
849
+ assert output == expected
850
+
851
+ def test_with_large_max_level(self):
852
+ # GH23843: Enhanced JSON normalize
853
+ max_level = 100
854
+ input_data = [
855
+ {
856
+ "CreatedBy": {
857
+ "user": {
858
+ "name": {"firstname": "Leo", "LastName": "Thomson"},
859
+ "family_tree": {
860
+ "father": {
861
+ "name": "Father001",
862
+ "father": {
863
+ "Name": "Father002",
864
+ "father": {
865
+ "name": "Father003",
866
+ "father": {"Name": "Father004"},
867
+ },
868
+ },
869
+ }
870
+ },
871
+ }
872
+ }
873
+ }
874
+ ]
875
+ expected = [
876
+ {
877
+ "CreatedBy.user.name.firstname": "Leo",
878
+ "CreatedBy.user.name.LastName": "Thomson",
879
+ "CreatedBy.user.family_tree.father.name": "Father001",
880
+ "CreatedBy.user.family_tree.father.father.Name": "Father002",
881
+ "CreatedBy.user.family_tree.father.father.father.name": "Father003",
882
+ "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", # noqa: E501
883
+ }
884
+ ]
885
+ output = nested_to_record(input_data, max_level=max_level)
886
+ assert output == expected
887
+
888
+ def test_series_non_zero_index(self):
889
+ # GH 19020
890
+ data = {
891
+ 0: {"id": 1, "name": "Foo", "elements": {"a": 1}},
892
+ 1: {"id": 2, "name": "Bar", "elements": {"b": 2}},
893
+ 2: {"id": 3, "name": "Baz", "elements": {"c": 3}},
894
+ }
895
+ s = Series(data)
896
+ s.index = [1, 2, 3]
897
+ result = json_normalize(s)
898
+ expected = DataFrame(
899
+ {
900
+ "id": [1, 2, 3],
901
+ "name": ["Foo", "Bar", "Baz"],
902
+ "elements.a": [1.0, np.nan, np.nan],
903
+ "elements.b": [np.nan, 2.0, np.nan],
904
+ "elements.c": [np.nan, np.nan, 3.0],
905
+ }
906
+ )
907
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_pandas.py ADDED
@@ -0,0 +1,2202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ from datetime import timedelta
3
+ from decimal import Decimal
4
+ from io import (
5
+ BytesIO,
6
+ StringIO,
7
+ )
8
+ import json
9
+ import os
10
+ import sys
11
+ import time
12
+
13
+ import numpy as np
14
+ import pytest
15
+
16
+ from pandas._config import using_pyarrow_string_dtype
17
+
18
+ from pandas.compat import IS64
19
+ import pandas.util._test_decorators as td
20
+
21
+ import pandas as pd
22
+ from pandas import (
23
+ NA,
24
+ DataFrame,
25
+ DatetimeIndex,
26
+ Index,
27
+ RangeIndex,
28
+ Series,
29
+ Timestamp,
30
+ date_range,
31
+ read_json,
32
+ )
33
+ import pandas._testing as tm
34
+ from pandas.core.arrays import (
35
+ ArrowStringArray,
36
+ StringArray,
37
+ )
38
+ from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics
39
+
40
+ from pandas.io.json import ujson_dumps
41
+
42
+
43
+ def test_literal_json_deprecation():
44
+ # PR 53409
45
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
46
+
47
+ jsonl = """{"a": 1, "b": 2}
48
+ {"a": 3, "b": 4}
49
+ {"a": 5, "b": 6}
50
+ {"a": 7, "b": 8}"""
51
+
52
+ msg = (
53
+ "Passing literal json to 'read_json' is deprecated and "
54
+ "will be removed in a future version. To read from a "
55
+ "literal string, wrap it in a 'StringIO' object."
56
+ )
57
+
58
+ with tm.assert_produces_warning(FutureWarning, match=msg):
59
+ try:
60
+ read_json(jsonl, lines=False)
61
+ except ValueError:
62
+ pass
63
+
64
+ with tm.assert_produces_warning(FutureWarning, match=msg):
65
+ read_json(expected.to_json(), lines=False)
66
+
67
+ with tm.assert_produces_warning(FutureWarning, match=msg):
68
+ result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
69
+ tm.assert_frame_equal(result, expected)
70
+
71
+ with tm.assert_produces_warning(FutureWarning, match=msg):
72
+ try:
73
+ result = read_json(
74
+ '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n',
75
+ lines=False,
76
+ )
77
+ except ValueError:
78
+ pass
79
+
80
+ with tm.assert_produces_warning(FutureWarning, match=msg):
81
+ try:
82
+ result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=False)
83
+ except ValueError:
84
+ pass
85
+ tm.assert_frame_equal(result, expected)
86
+
87
+
88
+ def assert_json_roundtrip_equal(result, expected, orient):
89
+ if orient in ("records", "values"):
90
+ expected = expected.reset_index(drop=True)
91
+ if orient == "values":
92
+ expected.columns = range(len(expected.columns))
93
+ tm.assert_frame_equal(result, expected)
94
+
95
+
96
+ class TestPandasContainer:
97
+ @pytest.fixture
98
+ def categorical_frame(self):
99
+ data = {
100
+ c: np.random.default_rng(i).standard_normal(30)
101
+ for i, c in enumerate(list("ABCD"))
102
+ }
103
+ cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * 15
104
+ data["E"] = list(reversed(cat))
105
+ data["sort"] = np.arange(30, dtype="int64")
106
+ return DataFrame(data, index=pd.CategoricalIndex(cat, name="E"))
107
+
108
+ @pytest.fixture
109
+ def datetime_series(self):
110
+ # Same as usual datetime_series, but with index freq set to None,
111
+ # since that doesn't round-trip, see GH#33711
112
+ ser = Series(
113
+ 1.1 * np.arange(10, dtype=np.float64),
114
+ index=date_range("2020-01-01", periods=10),
115
+ name="ts",
116
+ )
117
+ ser.index = ser.index._with_freq(None)
118
+ return ser
119
+
120
+ @pytest.fixture
121
+ def datetime_frame(self):
122
+ # Same as usual datetime_frame, but with index freq set to None,
123
+ # since that doesn't round-trip, see GH#33711
124
+ df = DataFrame(
125
+ np.random.default_rng(2).standard_normal((30, 4)),
126
+ columns=Index(list("ABCD"), dtype=object),
127
+ index=date_range("2000-01-01", periods=30, freq="B"),
128
+ )
129
+ df.index = df.index._with_freq(None)
130
+ return df
131
+
132
+ def test_frame_double_encoded_labels(self, orient):
133
+ df = DataFrame(
134
+ [["a", "b"], ["c", "d"]],
135
+ index=['index " 1', "index / 2"],
136
+ columns=["a \\ b", "y / z"],
137
+ )
138
+
139
+ data = StringIO(df.to_json(orient=orient))
140
+ result = read_json(data, orient=orient)
141
+ expected = df.copy()
142
+ assert_json_roundtrip_equal(result, expected, orient)
143
+
144
+ @pytest.mark.parametrize("orient", ["split", "records", "values"])
145
+ def test_frame_non_unique_index(self, orient):
146
+ df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
147
+ data = StringIO(df.to_json(orient=orient))
148
+ result = read_json(data, orient=orient)
149
+ expected = df.copy()
150
+
151
+ assert_json_roundtrip_equal(result, expected, orient)
152
+
153
+ @pytest.mark.parametrize("orient", ["index", "columns"])
154
+ def test_frame_non_unique_index_raises(self, orient):
155
+ df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
156
+ msg = f"DataFrame index must be unique for orient='{orient}'"
157
+ with pytest.raises(ValueError, match=msg):
158
+ df.to_json(orient=orient)
159
+
160
+ @pytest.mark.parametrize("orient", ["split", "values"])
161
+ @pytest.mark.parametrize(
162
+ "data",
163
+ [
164
+ [["a", "b"], ["c", "d"]],
165
+ [[1.5, 2.5], [3.5, 4.5]],
166
+ [[1, 2.5], [3, 4.5]],
167
+ [[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
168
+ ],
169
+ )
170
+ def test_frame_non_unique_columns(self, orient, data):
171
+ df = DataFrame(data, index=[1, 2], columns=["x", "x"])
172
+
173
+ result = read_json(
174
+ StringIO(df.to_json(orient=orient)), orient=orient, convert_dates=["x"]
175
+ )
176
+ if orient == "values":
177
+ expected = DataFrame(data)
178
+ if expected.iloc[:, 0].dtype == "datetime64[ns]":
179
+ # orient == "values" by default will write Timestamp objects out
180
+ # in milliseconds; these are internally stored in nanosecond,
181
+ # so divide to get where we need
182
+ # TODO: a to_epoch method would also solve; see GH 14772
183
+ expected.isetitem(0, expected.iloc[:, 0].astype(np.int64) // 1000000)
184
+ elif orient == "split":
185
+ expected = df
186
+ expected.columns = ["x", "x.1"]
187
+
188
+ tm.assert_frame_equal(result, expected)
189
+
190
+ @pytest.mark.parametrize("orient", ["index", "columns", "records"])
191
+ def test_frame_non_unique_columns_raises(self, orient):
192
+ df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
193
+
194
+ msg = f"DataFrame columns must be unique for orient='{orient}'"
195
+ with pytest.raises(ValueError, match=msg):
196
+ df.to_json(orient=orient)
197
+
198
+ def test_frame_default_orient(self, float_frame):
199
+ assert float_frame.to_json() == float_frame.to_json(orient="columns")
200
+
201
+ @pytest.mark.parametrize("dtype", [False, float])
202
+ @pytest.mark.parametrize("convert_axes", [True, False])
203
+ def test_roundtrip_simple(self, orient, convert_axes, dtype, float_frame):
204
+ data = StringIO(float_frame.to_json(orient=orient))
205
+ result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
206
+
207
+ expected = float_frame
208
+
209
+ assert_json_roundtrip_equal(result, expected, orient)
210
+
211
+ @pytest.mark.parametrize("dtype", [False, np.int64])
212
+ @pytest.mark.parametrize("convert_axes", [True, False])
213
+ def test_roundtrip_intframe(self, orient, convert_axes, dtype, int_frame):
214
+ data = StringIO(int_frame.to_json(orient=orient))
215
+ result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
216
+ expected = int_frame
217
+ assert_json_roundtrip_equal(result, expected, orient)
218
+
219
+ @pytest.mark.parametrize("dtype", [None, np.float64, int, "U3"])
220
+ @pytest.mark.parametrize("convert_axes", [True, False])
221
+ def test_roundtrip_str_axes(self, orient, convert_axes, dtype):
222
+ df = DataFrame(
223
+ np.zeros((200, 4)),
224
+ columns=[str(i) for i in range(4)],
225
+ index=[str(i) for i in range(200)],
226
+ dtype=dtype,
227
+ )
228
+
229
+ data = StringIO(df.to_json(orient=orient))
230
+ result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
231
+
232
+ expected = df.copy()
233
+ if not dtype:
234
+ expected = expected.astype(np.int64)
235
+
236
+ # index columns, and records orients cannot fully preserve the string
237
+ # dtype for axes as the index and column labels are used as keys in
238
+ # JSON objects. JSON keys are by definition strings, so there's no way
239
+ # to disambiguate whether those keys actually were strings or numeric
240
+ # beforehand and numeric wins out.
241
+ if convert_axes and (orient in ("index", "columns")):
242
+ expected.columns = expected.columns.astype(np.int64)
243
+ expected.index = expected.index.astype(np.int64)
244
+ elif orient == "records" and convert_axes:
245
+ expected.columns = expected.columns.astype(np.int64)
246
+ elif convert_axes and orient == "split":
247
+ expected.columns = expected.columns.astype(np.int64)
248
+
249
+ assert_json_roundtrip_equal(result, expected, orient)
250
+
251
+ @pytest.mark.parametrize("convert_axes", [True, False])
252
+ def test_roundtrip_categorical(
253
+ self, request, orient, categorical_frame, convert_axes, using_infer_string
254
+ ):
255
+ # TODO: create a better frame to test with and improve coverage
256
+ if orient in ("index", "columns"):
257
+ request.applymarker(
258
+ pytest.mark.xfail(
259
+ reason=f"Can't have duplicate index values for orient '{orient}')"
260
+ )
261
+ )
262
+
263
+ data = StringIO(categorical_frame.to_json(orient=orient))
264
+ result = read_json(data, orient=orient, convert_axes=convert_axes)
265
+
266
+ expected = categorical_frame.copy()
267
+ expected.index = expected.index.astype(
268
+ str if not using_infer_string else "string[pyarrow_numpy]"
269
+ ) # Categorical not preserved
270
+ expected.index.name = None # index names aren't preserved in JSON
271
+ assert_json_roundtrip_equal(result, expected, orient)
272
+
273
+ @pytest.mark.parametrize("convert_axes", [True, False])
274
+ def test_roundtrip_empty(self, orient, convert_axes):
275
+ empty_frame = DataFrame()
276
+ data = StringIO(empty_frame.to_json(orient=orient))
277
+ result = read_json(data, orient=orient, convert_axes=convert_axes)
278
+ if orient == "split":
279
+ idx = Index([], dtype=(float if convert_axes else object))
280
+ expected = DataFrame(index=idx, columns=idx)
281
+ elif orient in ["index", "columns"]:
282
+ expected = DataFrame()
283
+ else:
284
+ expected = empty_frame.copy()
285
+
286
+ tm.assert_frame_equal(result, expected)
287
+
288
+ @pytest.mark.parametrize("convert_axes", [True, False])
289
+ def test_roundtrip_timestamp(self, orient, convert_axes, datetime_frame):
290
+ # TODO: improve coverage with date_format parameter
291
+ data = StringIO(datetime_frame.to_json(orient=orient))
292
+ result = read_json(data, orient=orient, convert_axes=convert_axes)
293
+ expected = datetime_frame.copy()
294
+
295
+ if not convert_axes: # one off for ts handling
296
+ # DTI gets converted to epoch values
297
+ idx = expected.index.view(np.int64) // 1000000
298
+ if orient != "split": # TODO: handle consistently across orients
299
+ idx = idx.astype(str)
300
+
301
+ expected.index = idx
302
+
303
+ assert_json_roundtrip_equal(result, expected, orient)
304
+
305
+ @pytest.mark.parametrize("convert_axes", [True, False])
306
+ def test_roundtrip_mixed(self, orient, convert_axes):
307
+ index = Index(["a", "b", "c", "d", "e"])
308
+ values = {
309
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
310
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
311
+ "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
312
+ "D": [True, False, True, False, True],
313
+ }
314
+
315
+ df = DataFrame(data=values, index=index)
316
+
317
+ data = StringIO(df.to_json(orient=orient))
318
+ result = read_json(data, orient=orient, convert_axes=convert_axes)
319
+
320
+ expected = df.copy()
321
+ expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
322
+
323
+ assert_json_roundtrip_equal(result, expected, orient)
324
+
325
+ @pytest.mark.xfail(
326
+ reason="#50456 Column multiindex is stored and loaded differently",
327
+ raises=AssertionError,
328
+ )
329
+ @pytest.mark.parametrize(
330
+ "columns",
331
+ [
332
+ [["2022", "2022"], ["JAN", "FEB"]],
333
+ [["2022", "2023"], ["JAN", "JAN"]],
334
+ [["2022", "2022"], ["JAN", "JAN"]],
335
+ ],
336
+ )
337
+ def test_roundtrip_multiindex(self, columns):
338
+ df = DataFrame(
339
+ [[1, 2], [3, 4]],
340
+ columns=pd.MultiIndex.from_arrays(columns),
341
+ )
342
+ data = StringIO(df.to_json(orient="split"))
343
+ result = read_json(data, orient="split")
344
+ tm.assert_frame_equal(result, df)
345
+
346
+ @pytest.mark.parametrize(
347
+ "data,msg,orient",
348
+ [
349
+ ('{"key":b:a:d}', "Expected object or value", "columns"),
350
+ # too few indices
351
+ (
352
+ '{"columns":["A","B"],'
353
+ '"index":["2","3"],'
354
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
355
+ "|".join(
356
+ [
357
+ r"Length of values \(3\) does not match length of index \(2\)",
358
+ ]
359
+ ),
360
+ "split",
361
+ ),
362
+ # too many columns
363
+ (
364
+ '{"columns":["A","B","C"],'
365
+ '"index":["1","2","3"],'
366
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
367
+ "3 columns passed, passed data had 2 columns",
368
+ "split",
369
+ ),
370
+ # bad key
371
+ (
372
+ '{"badkey":["A","B"],'
373
+ '"index":["2","3"],'
374
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
375
+ r"unexpected key\(s\): badkey",
376
+ "split",
377
+ ),
378
+ ],
379
+ )
380
+ def test_frame_from_json_bad_data_raises(self, data, msg, orient):
381
+ with pytest.raises(ValueError, match=msg):
382
+ read_json(StringIO(data), orient=orient)
383
+
384
+ @pytest.mark.parametrize("dtype", [True, False])
385
+ @pytest.mark.parametrize("convert_axes", [True, False])
386
+ def test_frame_from_json_missing_data(self, orient, convert_axes, dtype):
387
+ num_df = DataFrame([[1, 2], [4, 5, 6]])
388
+
389
+ result = read_json(
390
+ StringIO(num_df.to_json(orient=orient)),
391
+ orient=orient,
392
+ convert_axes=convert_axes,
393
+ dtype=dtype,
394
+ )
395
+ assert np.isnan(result.iloc[0, 2])
396
+
397
+ obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
398
+ result = read_json(
399
+ StringIO(obj_df.to_json(orient=orient)),
400
+ orient=orient,
401
+ convert_axes=convert_axes,
402
+ dtype=dtype,
403
+ )
404
+ assert np.isnan(result.iloc[0, 2])
405
+
406
+ @pytest.mark.parametrize("dtype", [True, False])
407
+ def test_frame_read_json_dtype_missing_value(self, dtype):
408
+ # GH28501 Parse missing values using read_json with dtype=False
409
+ # to NaN instead of None
410
+ result = read_json(StringIO("[null]"), dtype=dtype)
411
+ expected = DataFrame([np.nan])
412
+
413
+ tm.assert_frame_equal(result, expected)
414
+
415
+ @pytest.mark.parametrize("inf", [np.inf, -np.inf])
416
+ @pytest.mark.parametrize("dtype", [True, False])
417
+ def test_frame_infinity(self, inf, dtype):
418
+ # infinities get mapped to nulls which get mapped to NaNs during
419
+ # deserialisation
420
+ df = DataFrame([[1, 2], [4, 5, 6]])
421
+ df.loc[0, 2] = inf
422
+
423
+ data = StringIO(df.to_json())
424
+ result = read_json(data, dtype=dtype)
425
+ assert np.isnan(result.iloc[0, 2])
426
+
427
+ @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865")
428
+ @pytest.mark.parametrize(
429
+ "value,precision,expected_val",
430
+ [
431
+ (0.95, 1, 1.0),
432
+ (1.95, 1, 2.0),
433
+ (-1.95, 1, -2.0),
434
+ (0.995, 2, 1.0),
435
+ (0.9995, 3, 1.0),
436
+ (0.99999999999999944, 15, 1.0),
437
+ ],
438
+ )
439
+ def test_frame_to_json_float_precision(self, value, precision, expected_val):
440
+ df = DataFrame([{"a_float": value}])
441
+ encoded = df.to_json(double_precision=precision)
442
+ assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
443
+
444
+ def test_frame_to_json_except(self):
445
+ df = DataFrame([1, 2, 3])
446
+ msg = "Invalid value 'garbage' for option 'orient'"
447
+ with pytest.raises(ValueError, match=msg):
448
+ df.to_json(orient="garbage")
449
+
450
+ def test_frame_empty(self):
451
+ df = DataFrame(columns=["jim", "joe"])
452
+ assert not df._is_mixed_type
453
+
454
+ data = StringIO(df.to_json())
455
+ result = read_json(data, dtype=dict(df.dtypes))
456
+ tm.assert_frame_equal(result, df, check_index_type=False)
457
+
458
+ def test_frame_empty_to_json(self):
459
+ # GH 7445
460
+ df = DataFrame({"test": []}, index=[])
461
+ result = df.to_json(orient="columns")
462
+ expected = '{"test":{}}'
463
+ assert result == expected
464
+
465
+ def test_frame_empty_mixedtype(self):
466
+ # mixed type
467
+ df = DataFrame(columns=["jim", "joe"])
468
+ df["joe"] = df["joe"].astype("i8")
469
+ assert df._is_mixed_type
470
+ data = df.to_json()
471
+ tm.assert_frame_equal(
472
+ read_json(StringIO(data), dtype=dict(df.dtypes)),
473
+ df,
474
+ check_index_type=False,
475
+ )
476
+
477
+ def test_frame_mixedtype_orient(self): # GH10289
478
+ vals = [
479
+ [10, 1, "foo", 0.1, 0.01],
480
+ [20, 2, "bar", 0.2, 0.02],
481
+ [30, 3, "baz", 0.3, 0.03],
482
+ [40, 4, "qux", 0.4, 0.04],
483
+ ]
484
+
485
+ df = DataFrame(
486
+ vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
487
+ )
488
+
489
+ assert df._is_mixed_type
490
+ right = df.copy()
491
+
492
+ for orient in ["split", "index", "columns"]:
493
+ inp = StringIO(df.to_json(orient=orient))
494
+ left = read_json(inp, orient=orient, convert_axes=False)
495
+ tm.assert_frame_equal(left, right)
496
+
497
+ right.index = RangeIndex(len(df))
498
+ inp = StringIO(df.to_json(orient="records"))
499
+ left = read_json(inp, orient="records", convert_axes=False)
500
+ tm.assert_frame_equal(left, right)
501
+
502
+ right.columns = RangeIndex(df.shape[1])
503
+ inp = StringIO(df.to_json(orient="values"))
504
+ left = read_json(inp, orient="values", convert_axes=False)
505
+ tm.assert_frame_equal(left, right)
506
+
507
+ def test_v12_compat(self, datapath):
508
+ dti = date_range("2000-01-03", "2000-01-07")
509
+ # freq doesn't roundtrip
510
+ dti = DatetimeIndex(np.asarray(dti), freq=None)
511
+ df = DataFrame(
512
+ [
513
+ [1.56808523, 0.65727391, 1.81021139, -0.17251653],
514
+ [-0.2550111, -0.08072427, -0.03202878, -0.17581665],
515
+ [1.51493992, 0.11805825, 1.629455, -1.31506612],
516
+ [-0.02765498, 0.44679743, 0.33192641, -0.27885413],
517
+ [0.05951614, -2.69652057, 1.28163262, 0.34703478],
518
+ ],
519
+ columns=["A", "B", "C", "D"],
520
+ index=dti,
521
+ )
522
+ df["date"] = Timestamp("19920106 18:21:32.12").as_unit("ns")
523
+ df.iloc[3, df.columns.get_loc("date")] = Timestamp("20130101")
524
+ df["modified"] = df["date"]
525
+ df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
526
+
527
+ dirpath = datapath("io", "json", "data")
528
+ v12_json = os.path.join(dirpath, "tsframe_v012.json")
529
+ df_unser = read_json(v12_json)
530
+ tm.assert_frame_equal(df, df_unser)
531
+
532
+ df_iso = df.drop(["modified"], axis=1)
533
+ v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
534
+ df_unser_iso = read_json(v12_iso_json)
535
+ tm.assert_frame_equal(df_iso, df_unser_iso, check_column_type=False)
536
+
537
+ def test_blocks_compat_GH9037(self, using_infer_string):
538
+ index = date_range("20000101", periods=10, freq="h")
539
+ # freq doesn't round-trip
540
+ index = DatetimeIndex(list(index), freq=None)
541
+
542
+ df_mixed = DataFrame(
543
+ {
544
+ "float_1": [
545
+ -0.92077639,
546
+ 0.77434435,
547
+ 1.25234727,
548
+ 0.61485564,
549
+ -0.60316077,
550
+ 0.24653374,
551
+ 0.28668979,
552
+ -2.51969012,
553
+ 0.95748401,
554
+ -1.02970536,
555
+ ],
556
+ "int_1": [
557
+ 19680418,
558
+ 75337055,
559
+ 99973684,
560
+ 65103179,
561
+ 79373900,
562
+ 40314334,
563
+ 21290235,
564
+ 4991321,
565
+ 41903419,
566
+ 16008365,
567
+ ],
568
+ "str_1": [
569
+ "78c608f1",
570
+ "64a99743",
571
+ "13d2ff52",
572
+ "ca7f4af2",
573
+ "97236474",
574
+ "bde7e214",
575
+ "1a6bde47",
576
+ "b1190be5",
577
+ "7a669144",
578
+ "8d64d068",
579
+ ],
580
+ "float_2": [
581
+ -0.0428278,
582
+ -1.80872357,
583
+ 3.36042349,
584
+ -0.7573685,
585
+ -0.48217572,
586
+ 0.86229683,
587
+ 1.08935819,
588
+ 0.93898739,
589
+ -0.03030452,
590
+ 1.43366348,
591
+ ],
592
+ "str_2": [
593
+ "14f04af9",
594
+ "d085da90",
595
+ "4bcfac83",
596
+ "81504caf",
597
+ "2ffef4a9",
598
+ "08e2f5c4",
599
+ "07e1af03",
600
+ "addbd4a7",
601
+ "1f6a09ba",
602
+ "4bfc4d87",
603
+ ],
604
+ "int_2": [
605
+ 86967717,
606
+ 98098830,
607
+ 51927505,
608
+ 20372254,
609
+ 12601730,
610
+ 20884027,
611
+ 34193846,
612
+ 10561746,
613
+ 24867120,
614
+ 76131025,
615
+ ],
616
+ },
617
+ index=index,
618
+ )
619
+
620
+ # JSON deserialisation always creates unicode strings
621
+ df_mixed.columns = df_mixed.columns.astype(
622
+ np.str_ if not using_infer_string else "string[pyarrow_numpy]"
623
+ )
624
+ data = StringIO(df_mixed.to_json(orient="split"))
625
+ df_roundtrip = read_json(data, orient="split")
626
+ tm.assert_frame_equal(
627
+ df_mixed,
628
+ df_roundtrip,
629
+ check_index_type=True,
630
+ check_column_type=True,
631
+ by_blocks=True,
632
+ check_exact=True,
633
+ )
634
+
635
+ def test_frame_nonprintable_bytes(self):
636
+ # GH14256: failing column caused segfaults, if it is not the last one
637
+
638
+ class BinaryThing:
639
+ def __init__(self, hexed) -> None:
640
+ self.hexed = hexed
641
+ self.binary = bytes.fromhex(hexed)
642
+
643
+ def __str__(self) -> str:
644
+ return self.hexed
645
+
646
+ hexed = "574b4454ba8c5eb4f98a8f45"
647
+ binthing = BinaryThing(hexed)
648
+
649
+ # verify the proper conversion of printable content
650
+ df_printable = DataFrame({"A": [binthing.hexed]})
651
+ assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
652
+
653
+ # check if non-printable content throws appropriate Exception
654
+ df_nonprintable = DataFrame({"A": [binthing]})
655
+ msg = "Unsupported UTF-8 sequence length when encoding string"
656
+ with pytest.raises(OverflowError, match=msg):
657
+ df_nonprintable.to_json()
658
+
659
+ # the same with multiple columns threw segfaults
660
+ df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
661
+ with pytest.raises(OverflowError, match=msg):
662
+ df_mixed.to_json()
663
+
664
+ # default_handler should resolve exceptions for non-string types
665
+ result = df_nonprintable.to_json(default_handler=str)
666
+ expected = f'{{"A":{{"0":"{hexed}"}}}}'
667
+ assert result == expected
668
+ assert (
669
+ df_mixed.to_json(default_handler=str)
670
+ == f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
671
+ )
672
+
673
+ def test_label_overflow(self):
674
+ # GH14256: buffer length not checked when writing label
675
+ result = DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
676
+ expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
677
+ assert result == expected
678
+
679
+ def test_series_non_unique_index(self):
680
+ s = Series(["a", "b"], index=[1, 1])
681
+
682
+ msg = "Series index must be unique for orient='index'"
683
+ with pytest.raises(ValueError, match=msg):
684
+ s.to_json(orient="index")
685
+
686
+ tm.assert_series_equal(
687
+ s,
688
+ read_json(
689
+ StringIO(s.to_json(orient="split")), orient="split", typ="series"
690
+ ),
691
+ )
692
+ unserialized = read_json(
693
+ StringIO(s.to_json(orient="records")), orient="records", typ="series"
694
+ )
695
+ tm.assert_equal(s.values, unserialized.values)
696
+
697
+ def test_series_default_orient(self, string_series):
698
+ assert string_series.to_json() == string_series.to_json(orient="index")
699
+
700
+ def test_series_roundtrip_simple(self, orient, string_series, using_infer_string):
701
+ data = StringIO(string_series.to_json(orient=orient))
702
+ result = read_json(data, typ="series", orient=orient)
703
+
704
+ expected = string_series
705
+ if using_infer_string and orient in ("split", "index", "columns"):
706
+ # These schemas don't contain dtypes, so we infer string
707
+ expected.index = expected.index.astype("string[pyarrow_numpy]")
708
+ if orient in ("values", "records"):
709
+ expected = expected.reset_index(drop=True)
710
+ if orient != "split":
711
+ expected.name = None
712
+
713
+ tm.assert_series_equal(result, expected)
714
+
715
+ @pytest.mark.parametrize("dtype", [False, None])
716
+ def test_series_roundtrip_object(self, orient, dtype, object_series):
717
+ data = StringIO(object_series.to_json(orient=orient))
718
+ result = read_json(data, typ="series", orient=orient, dtype=dtype)
719
+
720
+ expected = object_series
721
+ if orient in ("values", "records"):
722
+ expected = expected.reset_index(drop=True)
723
+ if orient != "split":
724
+ expected.name = None
725
+
726
+ tm.assert_series_equal(result, expected)
727
+
728
+ def test_series_roundtrip_empty(self, orient):
729
+ empty_series = Series([], index=[], dtype=np.float64)
730
+ data = StringIO(empty_series.to_json(orient=orient))
731
+ result = read_json(data, typ="series", orient=orient)
732
+
733
+ expected = empty_series.reset_index(drop=True)
734
+ if orient in ("split"):
735
+ expected.index = expected.index.astype(np.float64)
736
+
737
+ tm.assert_series_equal(result, expected)
738
+
739
+ def test_series_roundtrip_timeseries(self, orient, datetime_series):
740
+ data = StringIO(datetime_series.to_json(orient=orient))
741
+ result = read_json(data, typ="series", orient=orient)
742
+
743
+ expected = datetime_series
744
+ if orient in ("values", "records"):
745
+ expected = expected.reset_index(drop=True)
746
+ if orient != "split":
747
+ expected.name = None
748
+
749
+ tm.assert_series_equal(result, expected)
750
+
751
+ @pytest.mark.parametrize("dtype", [np.float64, int])
752
+ def test_series_roundtrip_numeric(self, orient, dtype):
753
+ s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
754
+ data = StringIO(s.to_json(orient=orient))
755
+ result = read_json(data, typ="series", orient=orient)
756
+
757
+ expected = s.copy()
758
+ if orient in ("values", "records"):
759
+ expected = expected.reset_index(drop=True)
760
+
761
+ tm.assert_series_equal(result, expected)
762
+
763
+ def test_series_to_json_except(self):
764
+ s = Series([1, 2, 3])
765
+ msg = "Invalid value 'garbage' for option 'orient'"
766
+ with pytest.raises(ValueError, match=msg):
767
+ s.to_json(orient="garbage")
768
+
769
+ def test_series_from_json_precise_float(self):
770
+ s = Series([4.56, 4.56, 4.56])
771
+ result = read_json(StringIO(s.to_json()), typ="series", precise_float=True)
772
+ tm.assert_series_equal(result, s, check_index_type=False)
773
+
774
+ def test_series_with_dtype(self):
775
+ # GH 21986
776
+ s = Series([4.56, 4.56, 4.56])
777
+ result = read_json(StringIO(s.to_json()), typ="series", dtype=np.int64)
778
+ expected = Series([4] * 3)
779
+ tm.assert_series_equal(result, expected)
780
+
781
+ @pytest.mark.parametrize(
782
+ "dtype,expected",
783
+ [
784
+ (True, Series(["2000-01-01"], dtype="datetime64[ns]")),
785
+ (False, Series([946684800000])),
786
+ ],
787
+ )
788
+ def test_series_with_dtype_datetime(self, dtype, expected):
789
+ s = Series(["2000-01-01"], dtype="datetime64[ns]")
790
+ data = StringIO(s.to_json())
791
+ result = read_json(data, typ="series", dtype=dtype)
792
+ tm.assert_series_equal(result, expected)
793
+
794
+ def test_frame_from_json_precise_float(self):
795
+ df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
796
+ result = read_json(StringIO(df.to_json()), precise_float=True)
797
+ tm.assert_frame_equal(result, df)
798
+
799
+ def test_typ(self):
800
+ s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
801
+ result = read_json(StringIO(s.to_json()), typ=None)
802
+ tm.assert_series_equal(result, s)
803
+
804
+ def test_reconstruction_index(self):
805
+ df = DataFrame([[1, 2, 3], [4, 5, 6]])
806
+ result = read_json(StringIO(df.to_json()))
807
+ tm.assert_frame_equal(result, df)
808
+
809
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
810
+ result = read_json(StringIO(df.to_json()))
811
+ tm.assert_frame_equal(result, df)
812
+
813
+ def test_path(self, float_frame, int_frame, datetime_frame):
814
+ with tm.ensure_clean("test.json") as path:
815
+ for df in [float_frame, int_frame, datetime_frame]:
816
+ df.to_json(path)
817
+ read_json(path)
818
+
819
+ def test_axis_dates(self, datetime_series, datetime_frame):
820
+ # frame
821
+ json = StringIO(datetime_frame.to_json())
822
+ result = read_json(json)
823
+ tm.assert_frame_equal(result, datetime_frame)
824
+
825
+ # series
826
+ json = StringIO(datetime_series.to_json())
827
+ result = read_json(json, typ="series")
828
+ tm.assert_series_equal(result, datetime_series, check_names=False)
829
+ assert result.name is None
830
+
831
+ def test_convert_dates(self, datetime_series, datetime_frame):
832
+ # frame
833
+ df = datetime_frame
834
+ df["date"] = Timestamp("20130101").as_unit("ns")
835
+
836
+ json = StringIO(df.to_json())
837
+ result = read_json(json)
838
+ tm.assert_frame_equal(result, df)
839
+
840
+ df["foo"] = 1.0
841
+ json = StringIO(df.to_json(date_unit="ns"))
842
+
843
+ result = read_json(json, convert_dates=False)
844
+ expected = df.copy()
845
+ expected["date"] = expected["date"].values.view("i8")
846
+ expected["foo"] = expected["foo"].astype("int64")
847
+ tm.assert_frame_equal(result, expected)
848
+
849
+ # series
850
+ ts = Series(Timestamp("20130101").as_unit("ns"), index=datetime_series.index)
851
+ json = StringIO(ts.to_json())
852
+ result = read_json(json, typ="series")
853
+ tm.assert_series_equal(result, ts)
854
+
855
+ @pytest.mark.parametrize("date_format", ["epoch", "iso"])
856
+ @pytest.mark.parametrize("as_object", [True, False])
857
+ @pytest.mark.parametrize("date_typ", [datetime.date, datetime.datetime, Timestamp])
858
+ def test_date_index_and_values(self, date_format, as_object, date_typ):
859
+ data = [date_typ(year=2020, month=1, day=1), pd.NaT]
860
+ if as_object:
861
+ data.append("a")
862
+
863
+ ser = Series(data, index=data)
864
+ result = ser.to_json(date_format=date_format)
865
+
866
+ if date_format == "epoch":
867
+ expected = '{"1577836800000":1577836800000,"null":null}'
868
+ else:
869
+ expected = (
870
+ '{"2020-01-01T00:00:00.000":"2020-01-01T00:00:00.000","null":null}'
871
+ )
872
+
873
+ if as_object:
874
+ expected = expected.replace("}", ',"a":"a"}')
875
+
876
+ assert result == expected
877
+
878
+ @pytest.mark.parametrize(
879
+ "infer_word",
880
+ [
881
+ "trade_time",
882
+ "date",
883
+ "datetime",
884
+ "sold_at",
885
+ "modified",
886
+ "timestamp",
887
+ "timestamps",
888
+ ],
889
+ )
890
+ def test_convert_dates_infer(self, infer_word):
891
+ # GH10747
892
+
893
+ data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
894
+ expected = DataFrame(
895
+ [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
896
+ )
897
+
898
+ result = read_json(StringIO(ujson_dumps(data)))[["id", infer_word]]
899
+ tm.assert_frame_equal(result, expected)
900
+
901
+ @pytest.mark.parametrize(
902
+ "date,date_unit",
903
+ [
904
+ ("20130101 20:43:42.123", None),
905
+ ("20130101 20:43:42", "s"),
906
+ ("20130101 20:43:42.123", "ms"),
907
+ ("20130101 20:43:42.123456", "us"),
908
+ ("20130101 20:43:42.123456789", "ns"),
909
+ ],
910
+ )
911
+ def test_date_format_frame(self, date, date_unit, datetime_frame):
912
+ df = datetime_frame
913
+
914
+ df["date"] = Timestamp(date).as_unit("ns")
915
+ df.iloc[1, df.columns.get_loc("date")] = pd.NaT
916
+ df.iloc[5, df.columns.get_loc("date")] = pd.NaT
917
+ if date_unit:
918
+ json = df.to_json(date_format="iso", date_unit=date_unit)
919
+ else:
920
+ json = df.to_json(date_format="iso")
921
+
922
+ result = read_json(StringIO(json))
923
+ expected = df.copy()
924
+ tm.assert_frame_equal(result, expected)
925
+
926
+ def test_date_format_frame_raises(self, datetime_frame):
927
+ df = datetime_frame
928
+ msg = "Invalid value 'foo' for option 'date_unit'"
929
+ with pytest.raises(ValueError, match=msg):
930
+ df.to_json(date_format="iso", date_unit="foo")
931
+
932
+ @pytest.mark.parametrize(
933
+ "date,date_unit",
934
+ [
935
+ ("20130101 20:43:42.123", None),
936
+ ("20130101 20:43:42", "s"),
937
+ ("20130101 20:43:42.123", "ms"),
938
+ ("20130101 20:43:42.123456", "us"),
939
+ ("20130101 20:43:42.123456789", "ns"),
940
+ ],
941
+ )
942
+ def test_date_format_series(self, date, date_unit, datetime_series):
943
+ ts = Series(Timestamp(date).as_unit("ns"), index=datetime_series.index)
944
+ ts.iloc[1] = pd.NaT
945
+ ts.iloc[5] = pd.NaT
946
+ if date_unit:
947
+ json = ts.to_json(date_format="iso", date_unit=date_unit)
948
+ else:
949
+ json = ts.to_json(date_format="iso")
950
+
951
+ result = read_json(StringIO(json), typ="series")
952
+ expected = ts.copy()
953
+ tm.assert_series_equal(result, expected)
954
+
955
+ def test_date_format_series_raises(self, datetime_series):
956
+ ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
957
+ msg = "Invalid value 'foo' for option 'date_unit'"
958
+ with pytest.raises(ValueError, match=msg):
959
+ ts.to_json(date_format="iso", date_unit="foo")
960
+
961
+ @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
962
+ def test_date_unit(self, unit, datetime_frame):
963
+ df = datetime_frame
964
+ df["date"] = Timestamp("20130101 20:43:42").as_unit("ns")
965
+ dl = df.columns.get_loc("date")
966
+ df.iloc[1, dl] = Timestamp("19710101 20:43:42")
967
+ df.iloc[2, dl] = Timestamp("21460101 20:43:42")
968
+ df.iloc[4, dl] = pd.NaT
969
+
970
+ json = df.to_json(date_format="epoch", date_unit=unit)
971
+
972
+ # force date unit
973
+ result = read_json(StringIO(json), date_unit=unit)
974
+ tm.assert_frame_equal(result, df)
975
+
976
+ # detect date unit
977
+ result = read_json(StringIO(json), date_unit=None)
978
+ tm.assert_frame_equal(result, df)
979
+
980
+ @pytest.mark.parametrize("unit", ["s", "ms", "us"])
981
+ def test_iso_non_nano_datetimes(self, unit):
982
+ # Test that numpy datetimes
983
+ # in an Index or a column with non-nano resolution can be serialized
984
+ # correctly
985
+ # GH53686
986
+ index = DatetimeIndex(
987
+ [np.datetime64("2023-01-01T11:22:33.123456", unit)],
988
+ dtype=f"datetime64[{unit}]",
989
+ )
990
+ df = DataFrame(
991
+ {
992
+ "date": Series(
993
+ [np.datetime64("2022-01-01T11:22:33.123456", unit)],
994
+ dtype=f"datetime64[{unit}]",
995
+ index=index,
996
+ ),
997
+ "date_obj": Series(
998
+ [np.datetime64("2023-01-01T11:22:33.123456", unit)],
999
+ dtype=object,
1000
+ index=index,
1001
+ ),
1002
+ },
1003
+ )
1004
+
1005
+ buf = StringIO()
1006
+ df.to_json(buf, date_format="iso", date_unit=unit)
1007
+ buf.seek(0)
1008
+
1009
+ # read_json always reads datetimes in nanosecond resolution
1010
+ # TODO: check_dtype/check_index_type should be removable
1011
+ # once read_json gets non-nano support
1012
+ tm.assert_frame_equal(
1013
+ read_json(buf, convert_dates=["date", "date_obj"]),
1014
+ df,
1015
+ check_index_type=False,
1016
+ check_dtype=False,
1017
+ )
1018
+
1019
+ def test_weird_nested_json(self):
1020
+ # this used to core dump the parser
1021
+ s = r"""{
1022
+ "status": "success",
1023
+ "data": {
1024
+ "posts": [
1025
+ {
1026
+ "id": 1,
1027
+ "title": "A blog post",
1028
+ "body": "Some useful content"
1029
+ },
1030
+ {
1031
+ "id": 2,
1032
+ "title": "Another blog post",
1033
+ "body": "More content"
1034
+ }
1035
+ ]
1036
+ }
1037
+ }"""
1038
+ read_json(StringIO(s))
1039
+
1040
+ def test_doc_example(self):
1041
+ dfj2 = DataFrame(
1042
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("AB")
1043
+ )
1044
+ dfj2["date"] = Timestamp("20130101")
1045
+ dfj2["ints"] = range(5)
1046
+ dfj2["bools"] = True
1047
+ dfj2.index = date_range("20130101", periods=5)
1048
+
1049
+ json = StringIO(dfj2.to_json())
1050
+ result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
1051
+ tm.assert_frame_equal(result, result)
1052
+
1053
+ def test_round_trip_exception(self, datapath):
1054
+ # GH 3867
1055
+ path = datapath("io", "json", "data", "teams.csv")
1056
+ df = pd.read_csv(path)
1057
+ s = df.to_json()
1058
+
1059
+ result = read_json(StringIO(s))
1060
+ res = result.reindex(index=df.index, columns=df.columns)
1061
+ msg = "The 'downcast' keyword in fillna is deprecated"
1062
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1063
+ res = res.fillna(np.nan, downcast=False)
1064
+ tm.assert_frame_equal(res, df)
1065
+
1066
+ @pytest.mark.network
1067
+ @pytest.mark.single_cpu
1068
+ @pytest.mark.parametrize(
1069
+ "field,dtype",
1070
+ [
1071
+ ["created_at", pd.DatetimeTZDtype(tz="UTC")],
1072
+ ["closed_at", "datetime64[ns]"],
1073
+ ["updated_at", pd.DatetimeTZDtype(tz="UTC")],
1074
+ ],
1075
+ )
1076
+ def test_url(self, field, dtype, httpserver):
1077
+ data = '{"created_at": ["2023-06-23T18:21:36Z"], "closed_at": ["2023-06-23T18:21:36"], "updated_at": ["2023-06-23T18:21:36Z"]}\n' # noqa: E501
1078
+ httpserver.serve_content(content=data)
1079
+ result = read_json(httpserver.url, convert_dates=True)
1080
+ assert result[field].dtype == dtype
1081
+
1082
+ def test_timedelta(self):
1083
+ converter = lambda x: pd.to_timedelta(x, unit="ms")
1084
+
1085
+ ser = Series([timedelta(23), timedelta(seconds=5)])
1086
+ assert ser.dtype == "timedelta64[ns]"
1087
+
1088
+ result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
1089
+ tm.assert_series_equal(result, ser)
1090
+
1091
+ ser = Series([timedelta(23), timedelta(seconds=5)], index=Index([0, 1]))
1092
+ assert ser.dtype == "timedelta64[ns]"
1093
+ result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
1094
+ tm.assert_series_equal(result, ser)
1095
+
1096
+ frame = DataFrame([timedelta(23), timedelta(seconds=5)])
1097
+ assert frame[0].dtype == "timedelta64[ns]"
1098
+ tm.assert_frame_equal(
1099
+ frame, read_json(StringIO(frame.to_json())).apply(converter)
1100
+ )
1101
+
1102
+ def test_timedelta2(self):
1103
+ frame = DataFrame(
1104
+ {
1105
+ "a": [timedelta(days=23), timedelta(seconds=5)],
1106
+ "b": [1, 2],
1107
+ "c": date_range(start="20130101", periods=2),
1108
+ }
1109
+ )
1110
+ data = StringIO(frame.to_json(date_unit="ns"))
1111
+ result = read_json(data)
1112
+ result["a"] = pd.to_timedelta(result.a, unit="ns")
1113
+ result["c"] = pd.to_datetime(result.c)
1114
+ tm.assert_frame_equal(frame, result)
1115
+
1116
+ def test_mixed_timedelta_datetime(self):
1117
+ td = timedelta(23)
1118
+ ts = Timestamp("20130101")
1119
+ frame = DataFrame({"a": [td, ts]}, dtype=object)
1120
+
1121
+ expected = DataFrame(
1122
+ {"a": [pd.Timedelta(td).as_unit("ns")._value, ts.as_unit("ns")._value]}
1123
+ )
1124
+ data = StringIO(frame.to_json(date_unit="ns"))
1125
+ result = read_json(data, dtype={"a": "int64"})
1126
+ tm.assert_frame_equal(result, expected, check_index_type=False)
1127
+
1128
+ @pytest.mark.parametrize("as_object", [True, False])
1129
+ @pytest.mark.parametrize("date_format", ["iso", "epoch"])
1130
+ @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
1131
+ def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
1132
+ # GH28156: to_json not correctly formatting Timedelta
1133
+ data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]
1134
+ if as_object:
1135
+ data.append("a")
1136
+
1137
+ ser = Series(data, index=data)
1138
+ if date_format == "iso":
1139
+ expected = (
1140
+ '{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
1141
+ )
1142
+ else:
1143
+ expected = '{"86400000":86400000,"172800000":172800000,"null":null}'
1144
+
1145
+ if as_object:
1146
+ expected = expected.replace("}", ',"a":"a"}')
1147
+
1148
+ result = ser.to_json(date_format=date_format)
1149
+ assert result == expected
1150
+
1151
+ @pytest.mark.parametrize("as_object", [True, False])
1152
+ @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
1153
+ def test_timedelta_to_json_fractional_precision(self, as_object, timedelta_typ):
1154
+ data = [timedelta_typ(milliseconds=42)]
1155
+ ser = Series(data, index=data)
1156
+ if as_object:
1157
+ ser = ser.astype(object)
1158
+
1159
+ result = ser.to_json()
1160
+ expected = '{"42":42}'
1161
+ assert result == expected
1162
+
1163
+ def test_default_handler(self):
1164
+ value = object()
1165
+ frame = DataFrame({"a": [7, value]})
1166
+ expected = DataFrame({"a": [7, str(value)]})
1167
+ result = read_json(StringIO(frame.to_json(default_handler=str)))
1168
+ tm.assert_frame_equal(expected, result, check_index_type=False)
1169
+
1170
+ def test_default_handler_indirect(self):
1171
+ def default(obj):
1172
+ if isinstance(obj, complex):
1173
+ return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
1174
+ return str(obj)
1175
+
1176
+ df_list = [
1177
+ 9,
1178
+ DataFrame(
1179
+ {"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
1180
+ columns=["a", "b"],
1181
+ ),
1182
+ ]
1183
+ expected = (
1184
+ '[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
1185
+ '["re",4.0],["im",-5.0]],"N\\/A"]]]'
1186
+ )
1187
+ assert (
1188
+ ujson_dumps(df_list, default_handler=default, orient="values") == expected
1189
+ )
1190
+
1191
+ def test_default_handler_numpy_unsupported_dtype(self):
1192
+ # GH12554 to_json raises 'Unhandled numpy dtype 15'
1193
+ df = DataFrame(
1194
+ {"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
1195
+ columns=["a", "b"],
1196
+ )
1197
+ expected = (
1198
+ '[["(1+0j)","(nan+0j)"],'
1199
+ '["(2.3+0j)","(nan+0j)"],'
1200
+ '["(4-5j)","(1.2+0j)"]]'
1201
+ )
1202
+ assert df.to_json(default_handler=str, orient="values") == expected
1203
+
1204
+ def test_default_handler_raises(self):
1205
+ msg = "raisin"
1206
+
1207
+ def my_handler_raises(obj):
1208
+ raise TypeError(msg)
1209
+
1210
+ with pytest.raises(TypeError, match=msg):
1211
+ DataFrame({"a": [1, 2, object()]}).to_json(
1212
+ default_handler=my_handler_raises
1213
+ )
1214
+ with pytest.raises(TypeError, match=msg):
1215
+ DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
1216
+ default_handler=my_handler_raises
1217
+ )
1218
+
1219
+ def test_categorical(self):
1220
+ # GH4377 df.to_json segfaults with non-ndarray blocks
1221
+ df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
1222
+ df["B"] = df["A"]
1223
+ expected = df.to_json()
1224
+
1225
+ df["B"] = df["A"].astype("category")
1226
+ assert expected == df.to_json()
1227
+
1228
+ s = df["A"]
1229
+ sc = df["B"]
1230
+ assert s.to_json() == sc.to_json()
1231
+
1232
+ def test_datetime_tz(self):
1233
+ # GH4377 df.to_json segfaults with non-ndarray blocks
1234
+ tz_range = date_range("20130101", periods=3, tz="US/Eastern")
1235
+ tz_naive = tz_range.tz_convert("utc").tz_localize(None)
1236
+
1237
+ df = DataFrame({"A": tz_range, "B": date_range("20130101", periods=3)})
1238
+
1239
+ df_naive = df.copy()
1240
+ df_naive["A"] = tz_naive
1241
+ expected = df_naive.to_json()
1242
+ assert expected == df.to_json()
1243
+
1244
+ stz = Series(tz_range)
1245
+ s_naive = Series(tz_naive)
1246
+ assert stz.to_json() == s_naive.to_json()
1247
+
1248
+ def test_sparse(self):
1249
+ # GH4377 df.to_json segfaults with non-ndarray blocks
1250
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
1251
+ df.loc[:8] = np.nan
1252
+
1253
+ sdf = df.astype("Sparse")
1254
+ expected = df.to_json()
1255
+ assert expected == sdf.to_json()
1256
+
1257
+ s = Series(np.random.default_rng(2).standard_normal(10))
1258
+ s.loc[:8] = np.nan
1259
+ ss = s.astype("Sparse")
1260
+
1261
+ expected = s.to_json()
1262
+ assert expected == ss.to_json()
1263
+
1264
+ @pytest.mark.parametrize(
1265
+ "ts",
1266
+ [
1267
+ Timestamp("2013-01-10 05:00:00Z"),
1268
+ Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
1269
+ Timestamp("2013-01-10 00:00:00-0500"),
1270
+ ],
1271
+ )
1272
+ def test_tz_is_utc(self, ts):
1273
+ exp = '"2013-01-10T05:00:00.000Z"'
1274
+
1275
+ assert ujson_dumps(ts, iso_dates=True) == exp
1276
+ dt = ts.to_pydatetime()
1277
+ assert ujson_dumps(dt, iso_dates=True) == exp
1278
+
1279
+ def test_tz_is_naive(self):
1280
+ ts = Timestamp("2013-01-10 05:00:00")
1281
+ exp = '"2013-01-10T05:00:00.000"'
1282
+
1283
+ assert ujson_dumps(ts, iso_dates=True) == exp
1284
+ dt = ts.to_pydatetime()
1285
+ assert ujson_dumps(dt, iso_dates=True) == exp
1286
+
1287
+ @pytest.mark.parametrize(
1288
+ "tz_range",
1289
+ [
1290
+ date_range("2013-01-01 05:00:00Z", periods=2),
1291
+ date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
1292
+ date_range("2013-01-01 00:00:00-0500", periods=2),
1293
+ ],
1294
+ )
1295
+ def test_tz_range_is_utc(self, tz_range):
1296
+ exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
1297
+ dfexp = (
1298
+ '{"DT":{'
1299
+ '"0":"2013-01-01T05:00:00.000Z",'
1300
+ '"1":"2013-01-02T05:00:00.000Z"}}'
1301
+ )
1302
+
1303
+ assert ujson_dumps(tz_range, iso_dates=True) == exp
1304
+ dti = DatetimeIndex(tz_range)
1305
+ # Ensure datetimes in object array are serialized correctly
1306
+ # in addition to the normal DTI case
1307
+ assert ujson_dumps(dti, iso_dates=True) == exp
1308
+ assert ujson_dumps(dti.astype(object), iso_dates=True) == exp
1309
+ df = DataFrame({"DT": dti})
1310
+ result = ujson_dumps(df, iso_dates=True)
1311
+ assert result == dfexp
1312
+ assert ujson_dumps(df.astype({"DT": object}), iso_dates=True)
1313
+
1314
+ def test_tz_range_is_naive(self):
1315
+ dti = date_range("2013-01-01 05:00:00", periods=2)
1316
+
1317
+ exp = '["2013-01-01T05:00:00.000","2013-01-02T05:00:00.000"]'
1318
+ dfexp = '{"DT":{"0":"2013-01-01T05:00:00.000","1":"2013-01-02T05:00:00.000"}}'
1319
+
1320
+ # Ensure datetimes in object array are serialized correctly
1321
+ # in addition to the normal DTI case
1322
+ assert ujson_dumps(dti, iso_dates=True) == exp
1323
+ assert ujson_dumps(dti.astype(object), iso_dates=True) == exp
1324
+ df = DataFrame({"DT": dti})
1325
+ result = ujson_dumps(df, iso_dates=True)
1326
+ assert result == dfexp
1327
+ assert ujson_dumps(df.astype({"DT": object}), iso_dates=True)
1328
+
1329
+ def test_read_inline_jsonl(self):
1330
+ # GH9180
1331
+
1332
+ result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)
1333
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
1334
+ tm.assert_frame_equal(result, expected)
1335
+
1336
+ @pytest.mark.single_cpu
1337
+ @td.skip_if_not_us_locale
1338
+ def test_read_s3_jsonl(self, s3_public_bucket_with_data, s3so):
1339
+ # GH17200
1340
+
1341
+ result = read_json(
1342
+ f"s3n://{s3_public_bucket_with_data.name}/items.jsonl",
1343
+ lines=True,
1344
+ storage_options=s3so,
1345
+ )
1346
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
1347
+ tm.assert_frame_equal(result, expected)
1348
+
1349
+ def test_read_local_jsonl(self):
1350
+ # GH17200
1351
+ with tm.ensure_clean("tmp_items.json") as path:
1352
+ with open(path, "w", encoding="utf-8") as infile:
1353
+ infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
1354
+ result = read_json(path, lines=True)
1355
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
1356
+ tm.assert_frame_equal(result, expected)
1357
+
1358
+ def test_read_jsonl_unicode_chars(self):
1359
+ # GH15132: non-ascii unicode characters
1360
+ # \u201d == RIGHT DOUBLE QUOTATION MARK
1361
+
1362
+ # simulate file handle
1363
+ json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
1364
+ json = StringIO(json)
1365
+ result = read_json(json, lines=True)
1366
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
1367
+ tm.assert_frame_equal(result, expected)
1368
+
1369
+ # simulate string
1370
+ json = StringIO('{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n')
1371
+ result = read_json(json, lines=True)
1372
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
1373
+ tm.assert_frame_equal(result, expected)
1374
+
1375
+ @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)])
1376
+ def test_to_json_large_numbers(self, bigNum):
1377
+ # GH34473
1378
+ series = Series(bigNum, dtype=object, index=["articleId"])
1379
+ json = series.to_json()
1380
+ expected = '{"articleId":' + str(bigNum) + "}"
1381
+ assert json == expected
1382
+
1383
+ df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0])
1384
+ json = df.to_json()
1385
+ expected = '{"0":{"articleId":' + str(bigNum) + "}}"
1386
+ assert json == expected
1387
+
1388
+ @pytest.mark.parametrize("bigNum", [-(2**63) - 1, 2**64])
1389
+ def test_read_json_large_numbers(self, bigNum):
1390
+ # GH20599, 26068
1391
+ json = StringIO('{"articleId":' + str(bigNum) + "}")
1392
+ msg = r"Value is too small|Value is too big"
1393
+ with pytest.raises(ValueError, match=msg):
1394
+ read_json(json)
1395
+
1396
+ json = StringIO('{"0":{"articleId":' + str(bigNum) + "}}")
1397
+ with pytest.raises(ValueError, match=msg):
1398
+ read_json(json)
1399
+
1400
+ def test_read_json_large_numbers2(self):
1401
+ # GH18842
1402
+ json = '{"articleId": "1404366058080022500245"}'
1403
+ json = StringIO(json)
1404
+ result = read_json(json, typ="series")
1405
+ expected = Series(1.404366e21, index=["articleId"])
1406
+ tm.assert_series_equal(result, expected)
1407
+
1408
+ json = '{"0": {"articleId": "1404366058080022500245"}}'
1409
+ json = StringIO(json)
1410
+ result = read_json(json)
1411
+ expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])
1412
+ tm.assert_frame_equal(result, expected)
1413
+
1414
+ def test_to_jsonl(self):
1415
+ # GH9180
1416
+ df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
1417
+ result = df.to_json(orient="records", lines=True)
1418
+ expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'
1419
+ assert result == expected
1420
+
1421
+ df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
1422
+ result = df.to_json(orient="records", lines=True)
1423
+ expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
1424
+ assert result == expected
1425
+ tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
1426
+
1427
+ # GH15096: escaped characters in columns and data
1428
+ df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
1429
+ result = df.to_json(orient="records", lines=True)
1430
+ expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
1431
+ assert result == expected
1432
+
1433
+ tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
1434
+
1435
+ # TODO: there is a near-identical test for pytables; can we share?
1436
+ @pytest.mark.xfail(reason="GH#13774 encoding kwarg not supported", raises=TypeError)
1437
+ @pytest.mark.parametrize(
1438
+ "val",
1439
+ [
1440
+ [b"E\xc9, 17", b"", b"a", b"b", b"c"],
1441
+ [b"E\xc9, 17", b"a", b"b", b"c"],
1442
+ [b"EE, 17", b"", b"a", b"b", b"c"],
1443
+ [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
1444
+ [b"", b"a", b"b", b"c"],
1445
+ [b"\xf8\xfc", b"a", b"b", b"c"],
1446
+ [b"A\xf8\xfc", b"", b"a", b"b", b"c"],
1447
+ [np.nan, b"", b"b", b"c"],
1448
+ [b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
1449
+ ],
1450
+ )
1451
+ @pytest.mark.parametrize("dtype", ["category", object])
1452
+ def test_latin_encoding(self, dtype, val):
1453
+ # GH 13774
1454
+ ser = Series(
1455
+ [x.decode("latin-1") if isinstance(x, bytes) else x for x in val],
1456
+ dtype=dtype,
1457
+ )
1458
+ encoding = "latin-1"
1459
+ with tm.ensure_clean("test.json") as path:
1460
+ ser.to_json(path, encoding=encoding)
1461
+ retr = read_json(StringIO(path), encoding=encoding)
1462
+ tm.assert_series_equal(ser, retr, check_categorical=False)
1463
+
1464
+ def test_data_frame_size_after_to_json(self):
1465
+ # GH15344
1466
+ df = DataFrame({"a": [str(1)]})
1467
+
1468
+ size_before = df.memory_usage(index=True, deep=True).sum()
1469
+ df.to_json()
1470
+ size_after = df.memory_usage(index=True, deep=True).sum()
1471
+
1472
+ assert size_before == size_after
1473
+
1474
+ @pytest.mark.parametrize(
1475
+ "index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]]
1476
+ )
1477
+ @pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]])
1478
+ def test_from_json_to_json_table_index_and_columns(self, index, columns):
1479
+ # GH25433 GH25435
1480
+ expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
1481
+ dfjson = expected.to_json(orient="table")
1482
+
1483
+ result = read_json(StringIO(dfjson), orient="table")
1484
+ tm.assert_frame_equal(result, expected)
1485
+
1486
+ def test_from_json_to_json_table_dtypes(self):
1487
+ # GH21345
1488
+ expected = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
1489
+ dfjson = expected.to_json(orient="table")
1490
+ result = read_json(StringIO(dfjson), orient="table")
1491
+ tm.assert_frame_equal(result, expected)
1492
+
1493
+ # TODO: We are casting to string which coerces None to NaN before casting back
1494
+ # to object, ending up with incorrect na values
1495
+ @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="incorrect na conversion")
1496
+ @pytest.mark.parametrize("orient", ["split", "records", "index", "columns"])
1497
+ def test_to_json_from_json_columns_dtypes(self, orient):
1498
+ # GH21892 GH33205
1499
+ expected = DataFrame.from_dict(
1500
+ {
1501
+ "Integer": Series([1, 2, 3], dtype="int64"),
1502
+ "Float": Series([None, 2.0, 3.0], dtype="float64"),
1503
+ "Object": Series([None, "", "c"], dtype="object"),
1504
+ "Bool": Series([True, False, True], dtype="bool"),
1505
+ "Category": Series(["a", "b", None], dtype="category"),
1506
+ "Datetime": Series(
1507
+ ["2020-01-01", None, "2020-01-03"], dtype="datetime64[ns]"
1508
+ ),
1509
+ }
1510
+ )
1511
+ dfjson = expected.to_json(orient=orient)
1512
+
1513
+ result = read_json(
1514
+ StringIO(dfjson),
1515
+ orient=orient,
1516
+ dtype={
1517
+ "Integer": "int64",
1518
+ "Float": "float64",
1519
+ "Object": "object",
1520
+ "Bool": "bool",
1521
+ "Category": "category",
1522
+ "Datetime": "datetime64[ns]",
1523
+ },
1524
+ )
1525
+ tm.assert_frame_equal(result, expected)
1526
+
1527
+ @pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}])
1528
+ def test_read_json_table_dtype_raises(self, dtype):
1529
+ # GH21345
1530
+ df = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
1531
+ dfjson = df.to_json(orient="table")
1532
+ msg = "cannot pass both dtype and orient='table'"
1533
+ with pytest.raises(ValueError, match=msg):
1534
+ read_json(dfjson, orient="table", dtype=dtype)
1535
+
1536
+ @pytest.mark.parametrize("orient", ["index", "columns", "records", "values"])
1537
+ def test_read_json_table_empty_axes_dtype(self, orient):
1538
+ # GH28558
1539
+
1540
+ expected = DataFrame()
1541
+ result = read_json(StringIO("{}"), orient=orient, convert_axes=True)
1542
+ tm.assert_index_equal(result.index, expected.index)
1543
+ tm.assert_index_equal(result.columns, expected.columns)
1544
+
1545
+ def test_read_json_table_convert_axes_raises(self):
1546
+ # GH25433 GH25435
1547
+ df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."])
1548
+ dfjson = df.to_json(orient="table")
1549
+ msg = "cannot pass both convert_axes and orient='table'"
1550
+ with pytest.raises(ValueError, match=msg):
1551
+ read_json(dfjson, orient="table", convert_axes=True)
1552
+
1553
+ @pytest.mark.parametrize(
1554
+ "data, expected",
1555
+ [
1556
+ (
1557
+ DataFrame([[1, 2], [4, 5]], columns=["a", "b"]),
1558
+ {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
1559
+ ),
1560
+ (
1561
+ DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"),
1562
+ {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
1563
+ ),
1564
+ (
1565
+ DataFrame(
1566
+ [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
1567
+ ),
1568
+ {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
1569
+ ),
1570
+ (Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}),
1571
+ (
1572
+ Series([1, 2, 3], name="A").rename_axis("foo"),
1573
+ {"name": "A", "data": [1, 2, 3]},
1574
+ ),
1575
+ (
1576
+ Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]),
1577
+ {"name": "A", "data": [1, 2]},
1578
+ ),
1579
+ ],
1580
+ )
1581
+ def test_index_false_to_json_split(self, data, expected):
1582
+ # GH 17394
1583
+ # Testing index=False in to_json with orient='split'
1584
+
1585
+ result = data.to_json(orient="split", index=False)
1586
+ result = json.loads(result)
1587
+
1588
+ assert result == expected
1589
+
1590
+ @pytest.mark.parametrize(
1591
+ "data",
1592
+ [
1593
+ (DataFrame([[1, 2], [4, 5]], columns=["a", "b"])),
1594
+ (DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")),
1595
+ (
1596
+ DataFrame(
1597
+ [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
1598
+ )
1599
+ ),
1600
+ (Series([1, 2, 3], name="A")),
1601
+ (Series([1, 2, 3], name="A").rename_axis("foo")),
1602
+ (Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])),
1603
+ ],
1604
+ )
1605
+ def test_index_false_to_json_table(self, data):
1606
+ # GH 17394
1607
+ # Testing index=False in to_json with orient='table'
1608
+
1609
+ result = data.to_json(orient="table", index=False)
1610
+ result = json.loads(result)
1611
+
1612
+ expected = {
1613
+ "schema": pd.io.json.build_table_schema(data, index=False),
1614
+ "data": DataFrame(data).to_dict(orient="records"),
1615
+ }
1616
+
1617
+ assert result == expected
1618
+
1619
+ @pytest.mark.parametrize("orient", ["index", "columns"])
1620
+ def test_index_false_error_to_json(self, orient):
1621
+ # GH 17394, 25513
1622
+ # Testing error message from to_json with index=False
1623
+
1624
+ df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
1625
+
1626
+ msg = (
1627
+ "'index=False' is only valid when 'orient' is 'split', "
1628
+ "'table', 'records', or 'values'"
1629
+ )
1630
+ with pytest.raises(ValueError, match=msg):
1631
+ df.to_json(orient=orient, index=False)
1632
+
1633
+ @pytest.mark.parametrize("orient", ["records", "values"])
1634
+ def test_index_true_error_to_json(self, orient):
1635
+ # GH 25513
1636
+ # Testing error message from to_json with index=True
1637
+
1638
+ df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
1639
+
1640
+ msg = (
1641
+ "'index=True' is only valid when 'orient' is 'split', "
1642
+ "'table', 'index', or 'columns'"
1643
+ )
1644
+ with pytest.raises(ValueError, match=msg):
1645
+ df.to_json(orient=orient, index=True)
1646
+
1647
+ @pytest.mark.parametrize("orient", ["split", "table"])
1648
+ @pytest.mark.parametrize("index", [True, False])
1649
+ def test_index_false_from_json_to_json(self, orient, index):
1650
+ # GH25170
1651
+ # Test index=False in from_json to_json
1652
+ expected = DataFrame({"a": [1, 2], "b": [3, 4]})
1653
+ dfjson = expected.to_json(orient=orient, index=index)
1654
+ result = read_json(StringIO(dfjson), orient=orient)
1655
+ tm.assert_frame_equal(result, expected)
1656
+
1657
+ def test_read_timezone_information(self):
1658
+ # GH 25546
1659
+ result = read_json(
1660
+ StringIO('{"2019-01-01T11:00:00.000Z":88}'), typ="series", orient="index"
1661
+ )
1662
+ exp_dti = DatetimeIndex(["2019-01-01 11:00:00"], dtype="M8[ns, UTC]")
1663
+ expected = Series([88], index=exp_dti)
1664
+ tm.assert_series_equal(result, expected)
1665
+
1666
+ @pytest.mark.parametrize(
1667
+ "url",
1668
+ [
1669
+ "s3://example-fsspec/",
1670
+ "gcs://another-fsspec/file.json",
1671
+ "https://example-site.com/data",
1672
+ "some-protocol://data.txt",
1673
+ ],
1674
+ )
1675
+ def test_read_json_with_url_value(self, url):
1676
+ # GH 36271
1677
+ result = read_json(StringIO(f'{{"url":{{"0":"{url}"}}}}'))
1678
+ expected = DataFrame({"url": [url]})
1679
+ tm.assert_frame_equal(result, expected)
1680
+
1681
+ @pytest.mark.parametrize(
1682
+ "compression",
1683
+ ["", ".gz", ".bz2", ".tar"],
1684
+ )
1685
+ def test_read_json_with_very_long_file_path(self, compression):
1686
+ # GH 46718
1687
+ long_json_path = f'{"a" * 1000}.json{compression}'
1688
+ with pytest.raises(
1689
+ FileNotFoundError, match=f"File {long_json_path} does not exist"
1690
+ ):
1691
+ # path too long for Windows is handled in file_exists() but raises in
1692
+ # _get_data_from_filepath()
1693
+ read_json(long_json_path)
1694
+
1695
+ @pytest.mark.parametrize(
1696
+ "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")]
1697
+ )
1698
+ def test_timedelta_as_label(self, date_format, key):
1699
+ df = DataFrame([[1]], columns=[pd.Timedelta("1D")])
1700
+ expected = f'{{"{key}":{{"0":1}}}}'
1701
+ result = df.to_json(date_format=date_format)
1702
+
1703
+ assert result == expected
1704
+
1705
+ @pytest.mark.parametrize(
1706
+ "orient,expected",
1707
+ [
1708
+ ("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"),
1709
+ ("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"),
1710
+ # TODO: the below have separate encoding procedures
1711
+ pytest.param(
1712
+ "split",
1713
+ "",
1714
+ marks=pytest.mark.xfail(
1715
+ reason="Produces JSON but not in a consistent manner"
1716
+ ),
1717
+ ),
1718
+ pytest.param(
1719
+ "table",
1720
+ "",
1721
+ marks=pytest.mark.xfail(
1722
+ reason="Produces JSON but not in a consistent manner"
1723
+ ),
1724
+ ),
1725
+ ],
1726
+ )
1727
+ def test_tuple_labels(self, orient, expected):
1728
+ # GH 20500
1729
+ df = DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")])
1730
+ result = df.to_json(orient=orient)
1731
+ assert result == expected
1732
+
1733
+ @pytest.mark.parametrize("indent", [1, 2, 4])
1734
+ def test_to_json_indent(self, indent):
1735
+ # GH 12004
1736
+ df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
1737
+
1738
+ result = df.to_json(indent=indent)
1739
+ spaces = " " * indent
1740
+ expected = f"""{{
1741
+ {spaces}"a":{{
1742
+ {spaces}{spaces}"0":"foo",
1743
+ {spaces}{spaces}"1":"baz"
1744
+ {spaces}}},
1745
+ {spaces}"b":{{
1746
+ {spaces}{spaces}"0":"bar",
1747
+ {spaces}{spaces}"1":"qux"
1748
+ {spaces}}}
1749
+ }}"""
1750
+
1751
+ assert result == expected
1752
+
1753
+ @pytest.mark.skipif(
1754
+ using_pyarrow_string_dtype(),
1755
+ reason="Adjust expected when infer_string is default, no bug here, "
1756
+ "just a complicated parametrization",
1757
+ )
1758
+ @pytest.mark.parametrize(
1759
+ "orient,expected",
1760
+ [
1761
+ (
1762
+ "split",
1763
+ """{
1764
+ "columns":[
1765
+ "a",
1766
+ "b"
1767
+ ],
1768
+ "index":[
1769
+ 0,
1770
+ 1
1771
+ ],
1772
+ "data":[
1773
+ [
1774
+ "foo",
1775
+ "bar"
1776
+ ],
1777
+ [
1778
+ "baz",
1779
+ "qux"
1780
+ ]
1781
+ ]
1782
+ }""",
1783
+ ),
1784
+ (
1785
+ "records",
1786
+ """[
1787
+ {
1788
+ "a":"foo",
1789
+ "b":"bar"
1790
+ },
1791
+ {
1792
+ "a":"baz",
1793
+ "b":"qux"
1794
+ }
1795
+ ]""",
1796
+ ),
1797
+ (
1798
+ "index",
1799
+ """{
1800
+ "0":{
1801
+ "a":"foo",
1802
+ "b":"bar"
1803
+ },
1804
+ "1":{
1805
+ "a":"baz",
1806
+ "b":"qux"
1807
+ }
1808
+ }""",
1809
+ ),
1810
+ (
1811
+ "columns",
1812
+ """{
1813
+ "a":{
1814
+ "0":"foo",
1815
+ "1":"baz"
1816
+ },
1817
+ "b":{
1818
+ "0":"bar",
1819
+ "1":"qux"
1820
+ }
1821
+ }""",
1822
+ ),
1823
+ (
1824
+ "values",
1825
+ """[
1826
+ [
1827
+ "foo",
1828
+ "bar"
1829
+ ],
1830
+ [
1831
+ "baz",
1832
+ "qux"
1833
+ ]
1834
+ ]""",
1835
+ ),
1836
+ (
1837
+ "table",
1838
+ """{
1839
+ "schema":{
1840
+ "fields":[
1841
+ {
1842
+ "name":"index",
1843
+ "type":"integer"
1844
+ },
1845
+ {
1846
+ "name":"a",
1847
+ "type":"string"
1848
+ },
1849
+ {
1850
+ "name":"b",
1851
+ "type":"string"
1852
+ }
1853
+ ],
1854
+ "primaryKey":[
1855
+ "index"
1856
+ ],
1857
+ "pandas_version":"1.4.0"
1858
+ },
1859
+ "data":[
1860
+ {
1861
+ "index":0,
1862
+ "a":"foo",
1863
+ "b":"bar"
1864
+ },
1865
+ {
1866
+ "index":1,
1867
+ "a":"baz",
1868
+ "b":"qux"
1869
+ }
1870
+ ]
1871
+ }""",
1872
+ ),
1873
+ ],
1874
+ )
1875
+ def test_json_indent_all_orients(self, orient, expected):
1876
+ # GH 12004
1877
+ df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
1878
+ result = df.to_json(orient=orient, indent=4)
1879
+ assert result == expected
1880
+
1881
+ def test_json_negative_indent_raises(self):
1882
+ with pytest.raises(ValueError, match="must be a nonnegative integer"):
1883
+ DataFrame().to_json(indent=-1)
1884
+
1885
+ def test_emca_262_nan_inf_support(self):
1886
+ # GH 12213
1887
+ data = StringIO(
1888
+ '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]'
1889
+ )
1890
+ result = read_json(data)
1891
+ expected = DataFrame(
1892
+ ["a", None, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"]
1893
+ )
1894
+ tm.assert_frame_equal(result, expected)
1895
+
1896
+ def test_frame_int_overflow(self):
1897
+ # GH 30320
1898
+ encoded_json = json.dumps([{"col": "31900441201190696999"}, {"col": "Text"}])
1899
+ expected = DataFrame({"col": ["31900441201190696999", "Text"]})
1900
+ result = read_json(StringIO(encoded_json))
1901
+ tm.assert_frame_equal(result, expected)
1902
+
1903
+ @pytest.mark.parametrize(
1904
+ "dataframe,expected",
1905
+ [
1906
+ (
1907
+ DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}),
1908
+ '{"(0, \'x\')":1,"(0, \'y\')":"a","(1, \'x\')":2,'
1909
+ '"(1, \'y\')":"b","(2, \'x\')":3,"(2, \'y\')":"c"}',
1910
+ )
1911
+ ],
1912
+ )
1913
+ def test_json_multiindex(self, dataframe, expected):
1914
+ series = dataframe.stack(future_stack=True)
1915
+ result = series.to_json(orient="index")
1916
+ assert result == expected
1917
+
1918
+ @pytest.mark.single_cpu
1919
+ def test_to_s3(self, s3_public_bucket, s3so):
1920
+ # GH 28375
1921
+ mock_bucket_name, target_file = s3_public_bucket.name, "test.json"
1922
+ df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
1923
+ df.to_json(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so)
1924
+ timeout = 5
1925
+ while True:
1926
+ if target_file in (obj.key for obj in s3_public_bucket.objects.all()):
1927
+ break
1928
+ time.sleep(0.1)
1929
+ timeout -= 0.1
1930
+ assert timeout > 0, "Timed out waiting for file to appear on moto"
1931
+
1932
+ def test_json_pandas_nulls(self, nulls_fixture, request):
1933
+ # GH 31615
1934
+ if isinstance(nulls_fixture, Decimal):
1935
+ mark = pytest.mark.xfail(reason="not implemented")
1936
+ request.applymarker(mark)
1937
+
1938
+ result = DataFrame([[nulls_fixture]]).to_json()
1939
+ assert result == '{"0":{"0":null}}'
1940
+
1941
+ def test_readjson_bool_series(self):
1942
+ # GH31464
1943
+ result = read_json(StringIO("[true, true, false]"), typ="series")
1944
+ expected = Series([True, True, False])
1945
+ tm.assert_series_equal(result, expected)
1946
+
1947
+ def test_to_json_multiindex_escape(self):
1948
+ # GH 15273
1949
+ df = DataFrame(
1950
+ True,
1951
+ index=date_range("2017-01-20", "2017-01-23"),
1952
+ columns=["foo", "bar"],
1953
+ ).stack(future_stack=True)
1954
+ result = df.to_json()
1955
+ expected = (
1956
+ "{\"(Timestamp('2017-01-20 00:00:00'), 'foo')\":true,"
1957
+ "\"(Timestamp('2017-01-20 00:00:00'), 'bar')\":true,"
1958
+ "\"(Timestamp('2017-01-21 00:00:00'), 'foo')\":true,"
1959
+ "\"(Timestamp('2017-01-21 00:00:00'), 'bar')\":true,"
1960
+ "\"(Timestamp('2017-01-22 00:00:00'), 'foo')\":true,"
1961
+ "\"(Timestamp('2017-01-22 00:00:00'), 'bar')\":true,"
1962
+ "\"(Timestamp('2017-01-23 00:00:00'), 'foo')\":true,"
1963
+ "\"(Timestamp('2017-01-23 00:00:00'), 'bar')\":true}"
1964
+ )
1965
+ assert result == expected
1966
+
1967
+ def test_to_json_series_of_objects(self):
1968
+ class _TestObject:
1969
+ def __init__(self, a, b, _c, d) -> None:
1970
+ self.a = a
1971
+ self.b = b
1972
+ self._c = _c
1973
+ self.d = d
1974
+
1975
+ def e(self):
1976
+ return 5
1977
+
1978
+ # JSON keys should be all non-callable non-underscore attributes, see GH-42768
1979
+ series = Series([_TestObject(a=1, b=2, _c=3, d=4)])
1980
+ assert json.loads(series.to_json()) == {"0": {"a": 1, "b": 2, "d": 4}}
1981
+
1982
+ @pytest.mark.parametrize(
1983
+ "data,expected",
1984
+ [
1985
+ (
1986
+ Series({0: -6 + 8j, 1: 0 + 1j, 2: 9 - 5j}),
1987
+ '{"0":{"imag":8.0,"real":-6.0},'
1988
+ '"1":{"imag":1.0,"real":0.0},'
1989
+ '"2":{"imag":-5.0,"real":9.0}}',
1990
+ ),
1991
+ (
1992
+ Series({0: -9.39 + 0.66j, 1: 3.95 + 9.32j, 2: 4.03 - 0.17j}),
1993
+ '{"0":{"imag":0.66,"real":-9.39},'
1994
+ '"1":{"imag":9.32,"real":3.95},'
1995
+ '"2":{"imag":-0.17,"real":4.03}}',
1996
+ ),
1997
+ (
1998
+ DataFrame([[-2 + 3j, -1 - 0j], [4 - 3j, -0 - 10j]]),
1999
+ '{"0":{"0":{"imag":3.0,"real":-2.0},'
2000
+ '"1":{"imag":-3.0,"real":4.0}},'
2001
+ '"1":{"0":{"imag":0.0,"real":-1.0},'
2002
+ '"1":{"imag":-10.0,"real":0.0}}}',
2003
+ ),
2004
+ (
2005
+ DataFrame(
2006
+ [[-0.28 + 0.34j, -1.08 - 0.39j], [0.41 - 0.34j, -0.78 - 1.35j]]
2007
+ ),
2008
+ '{"0":{"0":{"imag":0.34,"real":-0.28},'
2009
+ '"1":{"imag":-0.34,"real":0.41}},'
2010
+ '"1":{"0":{"imag":-0.39,"real":-1.08},'
2011
+ '"1":{"imag":-1.35,"real":-0.78}}}',
2012
+ ),
2013
+ ],
2014
+ )
2015
+ def test_complex_data_tojson(self, data, expected):
2016
+ # GH41174
2017
+ result = data.to_json()
2018
+ assert result == expected
2019
+
2020
+ def test_json_uint64(self):
2021
+ # GH21073
2022
+ expected = (
2023
+ '{"columns":["col1"],"index":[0,1],'
2024
+ '"data":[[13342205958987758245],[12388075603347835679]]}'
2025
+ )
2026
+ df = DataFrame(data={"col1": [13342205958987758245, 12388075603347835679]})
2027
+ result = df.to_json(orient="split")
2028
+ assert result == expected
2029
+
2030
+ @pytest.mark.parametrize(
2031
+ "orient", ["split", "records", "values", "index", "columns"]
2032
+ )
2033
+ def test_read_json_dtype_backend(
2034
+ self, string_storage, dtype_backend, orient, using_infer_string
2035
+ ):
2036
+ # GH#50750
2037
+ pa = pytest.importorskip("pyarrow")
2038
+ df = DataFrame(
2039
+ {
2040
+ "a": Series([1, np.nan, 3], dtype="Int64"),
2041
+ "b": Series([1, 2, 3], dtype="Int64"),
2042
+ "c": Series([1.5, np.nan, 2.5], dtype="Float64"),
2043
+ "d": Series([1.5, 2.0, 2.5], dtype="Float64"),
2044
+ "e": [True, False, None],
2045
+ "f": [True, False, True],
2046
+ "g": ["a", "b", "c"],
2047
+ "h": ["a", "b", None],
2048
+ }
2049
+ )
2050
+
2051
+ if using_infer_string:
2052
+ string_array = ArrowStringArrayNumpySemantics(pa.array(["a", "b", "c"]))
2053
+ string_array_na = ArrowStringArrayNumpySemantics(pa.array(["a", "b", None]))
2054
+ elif string_storage == "python":
2055
+ string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
2056
+ string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
2057
+
2058
+ elif dtype_backend == "pyarrow":
2059
+ pa = pytest.importorskip("pyarrow")
2060
+ from pandas.arrays import ArrowExtensionArray
2061
+
2062
+ string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
2063
+ string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
2064
+
2065
+ else:
2066
+ string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
2067
+ string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
2068
+
2069
+ out = df.to_json(orient=orient)
2070
+ with pd.option_context("mode.string_storage", string_storage):
2071
+ result = read_json(
2072
+ StringIO(out), dtype_backend=dtype_backend, orient=orient
2073
+ )
2074
+
2075
+ expected = DataFrame(
2076
+ {
2077
+ "a": Series([1, np.nan, 3], dtype="Int64"),
2078
+ "b": Series([1, 2, 3], dtype="Int64"),
2079
+ "c": Series([1.5, np.nan, 2.5], dtype="Float64"),
2080
+ "d": Series([1.5, 2.0, 2.5], dtype="Float64"),
2081
+ "e": Series([True, False, NA], dtype="boolean"),
2082
+ "f": Series([True, False, True], dtype="boolean"),
2083
+ "g": string_array,
2084
+ "h": string_array_na,
2085
+ }
2086
+ )
2087
+
2088
+ if dtype_backend == "pyarrow":
2089
+ from pandas.arrays import ArrowExtensionArray
2090
+
2091
+ expected = DataFrame(
2092
+ {
2093
+ col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
2094
+ for col in expected.columns
2095
+ }
2096
+ )
2097
+
2098
+ if orient == "values":
2099
+ expected.columns = list(range(8))
2100
+
2101
+ tm.assert_frame_equal(result, expected)
2102
+
2103
+ @pytest.mark.parametrize("orient", ["split", "records", "index"])
2104
+ def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):
2105
+ # GH#50750
2106
+ pa = pytest.importorskip("pyarrow")
2107
+ ser = Series([1, np.nan, 3], dtype="Int64")
2108
+
2109
+ out = ser.to_json(orient=orient)
2110
+ with pd.option_context("mode.string_storage", string_storage):
2111
+ result = read_json(
2112
+ StringIO(out), dtype_backend=dtype_backend, orient=orient, typ="series"
2113
+ )
2114
+
2115
+ expected = Series([1, np.nan, 3], dtype="Int64")
2116
+
2117
+ if dtype_backend == "pyarrow":
2118
+ from pandas.arrays import ArrowExtensionArray
2119
+
2120
+ expected = Series(ArrowExtensionArray(pa.array(expected, from_pandas=True)))
2121
+
2122
+ tm.assert_series_equal(result, expected)
2123
+
2124
+ def test_invalid_dtype_backend(self):
2125
+ msg = (
2126
+ "dtype_backend numpy is invalid, only 'numpy_nullable' and "
2127
+ "'pyarrow' are allowed."
2128
+ )
2129
+ with pytest.raises(ValueError, match=msg):
2130
+ read_json("test", dtype_backend="numpy")
2131
+
2132
+
2133
+ def test_invalid_engine():
2134
+ # GH 48893
2135
+ ser = Series(range(1))
2136
+ out = ser.to_json()
2137
+ with pytest.raises(ValueError, match="The engine type foo"):
2138
+ read_json(out, engine="foo")
2139
+
2140
+
2141
+ def test_pyarrow_engine_lines_false():
2142
+ # GH 48893
2143
+ ser = Series(range(1))
2144
+ out = ser.to_json()
2145
+ with pytest.raises(ValueError, match="currently pyarrow engine only supports"):
2146
+ read_json(out, engine="pyarrow", lines=False)
2147
+
2148
+
2149
+ def test_json_roundtrip_string_inference(orient):
2150
+ pytest.importorskip("pyarrow")
2151
+ df = DataFrame(
2152
+ [["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"]
2153
+ )
2154
+ out = df.to_json()
2155
+ with pd.option_context("future.infer_string", True):
2156
+ result = read_json(StringIO(out))
2157
+ expected = DataFrame(
2158
+ [["a", "b"], ["c", "d"]],
2159
+ dtype="string[pyarrow_numpy]",
2160
+ index=Index(["row 1", "row 2"], dtype="string[pyarrow_numpy]"),
2161
+ columns=Index(["col 1", "col 2"], dtype="string[pyarrow_numpy]"),
2162
+ )
2163
+ tm.assert_frame_equal(result, expected)
2164
+
2165
+
2166
+ def test_json_pos_args_deprecation():
2167
+ # GH-54229
2168
+ df = DataFrame({"a": [1, 2, 3]})
2169
+ msg = (
2170
+ r"Starting with pandas version 3.0 all arguments of to_json except for the "
2171
+ r"argument 'path_or_buf' will be keyword-only."
2172
+ )
2173
+ with tm.assert_produces_warning(FutureWarning, match=msg):
2174
+ buf = BytesIO()
2175
+ df.to_json(buf, "split")
2176
+
2177
+
2178
+ @td.skip_if_no("pyarrow")
2179
+ def test_to_json_ea_null():
2180
+ # GH#57224
2181
+ df = DataFrame(
2182
+ {
2183
+ "a": Series([1, NA], dtype="int64[pyarrow]"),
2184
+ "b": Series([2, NA], dtype="Int64"),
2185
+ }
2186
+ )
2187
+ result = df.to_json(orient="records", lines=True)
2188
+ expected = """{"a":1,"b":2}
2189
+ {"a":null,"b":null}
2190
+ """
2191
+ assert result == expected
2192
+
2193
+
2194
+ def test_read_json_lines_rangeindex():
2195
+ # GH 57429
2196
+ data = """
2197
+ {"a": 1, "b": 2}
2198
+ {"a": 3, "b": 4}
2199
+ """
2200
+ result = read_json(StringIO(data), lines=True).index
2201
+ expected = RangeIndex(2)
2202
+ tm.assert_index_equal(result, expected, exact=True)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterator
2
+ from io import StringIO
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ read_json,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+ from pandas.io.json._json import JsonReader
16
+
17
+ pytestmark = pytest.mark.filterwarnings(
18
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
19
+ )
20
+
21
+
22
+ @pytest.fixture
23
+ def lines_json_df():
24
+ df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
25
+ return df.to_json(lines=True, orient="records")
26
+
27
+
28
+ @pytest.fixture(params=["ujson", "pyarrow"])
29
+ def engine(request):
30
+ if request.param == "pyarrow":
31
+ pytest.importorskip("pyarrow.json")
32
+ return request.param
33
+
34
+
35
+ def test_read_jsonl():
36
+ # GH9180
37
+ result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)
38
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
39
+ tm.assert_frame_equal(result, expected)
40
+
41
+
42
+ def test_read_jsonl_engine_pyarrow(datapath, engine):
43
+ result = read_json(
44
+ datapath("io", "json", "data", "line_delimited.json"),
45
+ lines=True,
46
+ engine=engine,
47
+ )
48
+ expected = DataFrame({"a": [1, 3, 5], "b": [2, 4, 6]})
49
+ tm.assert_frame_equal(result, expected)
50
+
51
+
52
+ def test_read_datetime(request, engine):
53
+ # GH33787
54
+ if engine == "pyarrow":
55
+ # GH 48893
56
+ reason = "Pyarrow only supports a file path as an input and line delimited json"
57
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
58
+
59
+ df = DataFrame(
60
+ [([1, 2], ["2020-03-05", "2020-04-08T09:58:49+00:00"], "hector")],
61
+ columns=["accounts", "date", "name"],
62
+ )
63
+ json_line = df.to_json(lines=True, orient="records")
64
+
65
+ if engine == "pyarrow":
66
+ result = read_json(StringIO(json_line), engine=engine)
67
+ else:
68
+ result = read_json(StringIO(json_line), engine=engine)
69
+ expected = DataFrame(
70
+ [[1, "2020-03-05", "hector"], [2, "2020-04-08T09:58:49+00:00", "hector"]],
71
+ columns=["accounts", "date", "name"],
72
+ )
73
+ tm.assert_frame_equal(result, expected)
74
+
75
+
76
+ def test_read_jsonl_unicode_chars():
77
+ # GH15132: non-ascii unicode characters
78
+ # \u201d == RIGHT DOUBLE QUOTATION MARK
79
+
80
+ # simulate file handle
81
+ json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
82
+ json = StringIO(json)
83
+ result = read_json(json, lines=True)
84
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
85
+ tm.assert_frame_equal(result, expected)
86
+
87
+ # simulate string
88
+ json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
89
+ result = read_json(StringIO(json), lines=True)
90
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
91
+ tm.assert_frame_equal(result, expected)
92
+
93
+
94
+ def test_to_jsonl():
95
+ # GH9180
96
+ df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
97
+ result = df.to_json(orient="records", lines=True)
98
+ expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'
99
+ assert result == expected
100
+
101
+ df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
102
+ result = df.to_json(orient="records", lines=True)
103
+ expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
104
+ assert result == expected
105
+ tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
106
+
107
+ # GH15096: escaped characters in columns and data
108
+ df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
109
+ result = df.to_json(orient="records", lines=True)
110
+ expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
111
+ assert result == expected
112
+ tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
113
+
114
+
115
+ def test_to_jsonl_count_new_lines():
116
+ # GH36888
117
+ df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
118
+ actual_new_lines_count = df.to_json(orient="records", lines=True).count("\n")
119
+ expected_new_lines_count = 2
120
+ assert actual_new_lines_count == expected_new_lines_count
121
+
122
+
123
+ @pytest.mark.parametrize("chunksize", [1, 1.0])
124
+ def test_readjson_chunks(request, lines_json_df, chunksize, engine):
125
+ # Basic test that read_json(chunks=True) gives the same result as
126
+ # read_json(chunks=False)
127
+ # GH17048: memory usage when lines=True
128
+
129
+ if engine == "pyarrow":
130
+ # GH 48893
131
+ reason = (
132
+ "Pyarrow only supports a file path as an input and line delimited json"
133
+ "and doesn't support chunksize parameter."
134
+ )
135
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
136
+
137
+ unchunked = read_json(StringIO(lines_json_df), lines=True)
138
+ with read_json(
139
+ StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine
140
+ ) as reader:
141
+ chunked = pd.concat(reader)
142
+
143
+ tm.assert_frame_equal(chunked, unchunked)
144
+
145
+
146
+ def test_readjson_chunksize_requires_lines(lines_json_df, engine):
147
+ msg = "chunksize can only be passed if lines=True"
148
+ with pytest.raises(ValueError, match=msg):
149
+ with read_json(
150
+ StringIO(lines_json_df), lines=False, chunksize=2, engine=engine
151
+ ) as _:
152
+ pass
153
+
154
+
155
+ def test_readjson_chunks_series(request, engine):
156
+ if engine == "pyarrow":
157
+ # GH 48893
158
+ reason = (
159
+ "Pyarrow only supports a file path as an input and line delimited json"
160
+ "and doesn't support chunksize parameter."
161
+ )
162
+ request.applymarker(pytest.mark.xfail(reason=reason))
163
+
164
+ # Test reading line-format JSON to Series with chunksize param
165
+ s = pd.Series({"A": 1, "B": 2})
166
+
167
+ strio = StringIO(s.to_json(lines=True, orient="records"))
168
+ unchunked = read_json(strio, lines=True, typ="Series", engine=engine)
169
+
170
+ strio = StringIO(s.to_json(lines=True, orient="records"))
171
+ with read_json(
172
+ strio, lines=True, typ="Series", chunksize=1, engine=engine
173
+ ) as reader:
174
+ chunked = pd.concat(reader)
175
+
176
+ tm.assert_series_equal(chunked, unchunked)
177
+
178
+
179
+ def test_readjson_each_chunk(request, lines_json_df, engine):
180
+ if engine == "pyarrow":
181
+ # GH 48893
182
+ reason = (
183
+ "Pyarrow only supports a file path as an input and line delimited json"
184
+ "and doesn't support chunksize parameter."
185
+ )
186
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
187
+
188
+ # Other tests check that the final result of read_json(chunksize=True)
189
+ # is correct. This checks the intermediate chunks.
190
+ with read_json(
191
+ StringIO(lines_json_df), lines=True, chunksize=2, engine=engine
192
+ ) as reader:
193
+ chunks = list(reader)
194
+ assert chunks[0].shape == (2, 2)
195
+ assert chunks[1].shape == (1, 2)
196
+
197
+
198
+ def test_readjson_chunks_from_file(request, engine):
199
+ if engine == "pyarrow":
200
+ # GH 48893
201
+ reason = (
202
+ "Pyarrow only supports a file path as an input and line delimited json"
203
+ "and doesn't support chunksize parameter."
204
+ )
205
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
206
+
207
+ with tm.ensure_clean("test.json") as path:
208
+ df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
209
+ df.to_json(path, lines=True, orient="records")
210
+ with read_json(path, lines=True, chunksize=1, engine=engine) as reader:
211
+ chunked = pd.concat(reader)
212
+ unchunked = read_json(path, lines=True, engine=engine)
213
+ tm.assert_frame_equal(unchunked, chunked)
214
+
215
+
216
+ @pytest.mark.parametrize("chunksize", [None, 1])
217
+ def test_readjson_chunks_closes(chunksize):
218
+ with tm.ensure_clean("test.json") as path:
219
+ df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
220
+ df.to_json(path, lines=True, orient="records")
221
+ reader = JsonReader(
222
+ path,
223
+ orient=None,
224
+ typ="frame",
225
+ dtype=True,
226
+ convert_axes=True,
227
+ convert_dates=True,
228
+ keep_default_dates=True,
229
+ precise_float=False,
230
+ date_unit=None,
231
+ encoding=None,
232
+ lines=True,
233
+ chunksize=chunksize,
234
+ compression=None,
235
+ nrows=None,
236
+ )
237
+ with reader:
238
+ reader.read()
239
+ assert (
240
+ reader.handles.handle.closed
241
+ ), f"didn't close stream with chunksize = {chunksize}"
242
+
243
+
244
+ @pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"])
245
+ def test_readjson_invalid_chunksize(lines_json_df, chunksize, engine):
246
+ msg = r"'chunksize' must be an integer >=1"
247
+
248
+ with pytest.raises(ValueError, match=msg):
249
+ with read_json(
250
+ StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine
251
+ ) as _:
252
+ pass
253
+
254
+
255
+ @pytest.mark.parametrize("chunksize", [None, 1, 2])
256
+ def test_readjson_chunks_multiple_empty_lines(chunksize):
257
+ j = """
258
+
259
+ {"A":1,"B":4}
260
+
261
+
262
+
263
+ {"A":2,"B":5}
264
+
265
+
266
+
267
+
268
+
269
+
270
+
271
+ {"A":3,"B":6}
272
+ """
273
+ orig = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
274
+ test = read_json(StringIO(j), lines=True, chunksize=chunksize)
275
+ if chunksize is not None:
276
+ with test:
277
+ test = pd.concat(test)
278
+ tm.assert_frame_equal(orig, test, obj=f"chunksize: {chunksize}")
279
+
280
+
281
+ def test_readjson_unicode(request, monkeypatch, engine):
282
+ if engine == "pyarrow":
283
+ # GH 48893
284
+ reason = (
285
+ "Pyarrow only supports a file path as an input and line delimited json"
286
+ "and doesn't support chunksize parameter."
287
+ )
288
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
289
+
290
+ with tm.ensure_clean("test.json") as path:
291
+ monkeypatch.setattr("locale.getpreferredencoding", lambda do_setlocale: "cp949")
292
+ with open(path, "w", encoding="utf-8") as f:
293
+ f.write('{"£©µÀÆÖÞßéöÿ":["АБВГДабвгд가"]}')
294
+
295
+ result = read_json(path, engine=engine)
296
+ expected = DataFrame({"£©µÀÆÖÞßéöÿ": ["АБВГДабвгд가"]})
297
+ tm.assert_frame_equal(result, expected)
298
+
299
+
300
+ @pytest.mark.parametrize("nrows", [1, 2])
301
+ def test_readjson_nrows(nrows, engine):
302
+ # GH 33916
303
+ # Test reading line-format JSON to Series with nrows param
304
+ jsonl = """{"a": 1, "b": 2}
305
+ {"a": 3, "b": 4}
306
+ {"a": 5, "b": 6}
307
+ {"a": 7, "b": 8}"""
308
+ result = read_json(StringIO(jsonl), lines=True, nrows=nrows)
309
+ expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
310
+ tm.assert_frame_equal(result, expected)
311
+
312
+
313
+ @pytest.mark.parametrize("nrows,chunksize", [(2, 2), (4, 2)])
314
+ def test_readjson_nrows_chunks(request, nrows, chunksize, engine):
315
+ # GH 33916
316
+ # Test reading line-format JSON to Series with nrows and chunksize param
317
+ if engine == "pyarrow":
318
+ # GH 48893
319
+ reason = (
320
+ "Pyarrow only supports a file path as an input and line delimited json"
321
+ "and doesn't support chunksize parameter."
322
+ )
323
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
324
+
325
+ jsonl = """{"a": 1, "b": 2}
326
+ {"a": 3, "b": 4}
327
+ {"a": 5, "b": 6}
328
+ {"a": 7, "b": 8}"""
329
+
330
+ if engine != "pyarrow":
331
+ with read_json(
332
+ StringIO(jsonl), lines=True, nrows=nrows, chunksize=chunksize, engine=engine
333
+ ) as reader:
334
+ chunked = pd.concat(reader)
335
+ else:
336
+ with read_json(
337
+ jsonl, lines=True, nrows=nrows, chunksize=chunksize, engine=engine
338
+ ) as reader:
339
+ chunked = pd.concat(reader)
340
+ expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
341
+ tm.assert_frame_equal(chunked, expected)
342
+
343
+
344
+ def test_readjson_nrows_requires_lines(engine):
345
+ # GH 33916
346
+ # Test ValueError raised if nrows is set without setting lines in read_json
347
+ jsonl = """{"a": 1, "b": 2}
348
+ {"a": 3, "b": 4}
349
+ {"a": 5, "b": 6}
350
+ {"a": 7, "b": 8}"""
351
+ msg = "nrows can only be passed if lines=True"
352
+ with pytest.raises(ValueError, match=msg):
353
+ read_json(jsonl, lines=False, nrows=2, engine=engine)
354
+
355
+
356
+ def test_readjson_lines_chunks_fileurl(request, datapath, engine):
357
+ # GH 27135
358
+ # Test reading line-format JSON from file url
359
+ if engine == "pyarrow":
360
+ # GH 48893
361
+ reason = (
362
+ "Pyarrow only supports a file path as an input and line delimited json"
363
+ "and doesn't support chunksize parameter."
364
+ )
365
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
366
+
367
+ df_list_expected = [
368
+ DataFrame([[1, 2]], columns=["a", "b"], index=[0]),
369
+ DataFrame([[3, 4]], columns=["a", "b"], index=[1]),
370
+ DataFrame([[5, 6]], columns=["a", "b"], index=[2]),
371
+ ]
372
+ os_path = datapath("io", "json", "data", "line_delimited.json")
373
+ file_url = Path(os_path).as_uri()
374
+ with read_json(file_url, lines=True, chunksize=1, engine=engine) as url_reader:
375
+ for index, chuck in enumerate(url_reader):
376
+ tm.assert_frame_equal(chuck, df_list_expected[index])
377
+
378
+
379
+ def test_chunksize_is_incremental():
380
+ # See https://github.com/pandas-dev/pandas/issues/34548
381
+ jsonl = (
382
+ """{"a": 1, "b": 2}
383
+ {"a": 3, "b": 4}
384
+ {"a": 5, "b": 6}
385
+ {"a": 7, "b": 8}\n"""
386
+ * 1000
387
+ )
388
+
389
+ class MyReader:
390
+ def __init__(self, contents) -> None:
391
+ self.read_count = 0
392
+ self.stringio = StringIO(contents)
393
+
394
+ def read(self, *args):
395
+ self.read_count += 1
396
+ return self.stringio.read(*args)
397
+
398
+ def __iter__(self) -> Iterator:
399
+ self.read_count += 1
400
+ return iter(self.stringio)
401
+
402
+ reader = MyReader(jsonl)
403
+ assert len(list(read_json(reader, lines=True, chunksize=100))) > 1
404
+ assert reader.read_count > 10
405
+
406
+
407
+ @pytest.mark.parametrize("orient_", ["split", "index", "table"])
408
+ def test_to_json_append_orient(orient_):
409
+ # GH 35849
410
+ # Test ValueError when orient is not 'records'
411
+ df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
412
+ msg = (
413
+ r"mode='a' \(append\) is only supported when "
414
+ "lines is True and orient is 'records'"
415
+ )
416
+ with pytest.raises(ValueError, match=msg):
417
+ df.to_json(mode="a", orient=orient_)
418
+
419
+
420
+ def test_to_json_append_lines():
421
+ # GH 35849
422
+ # Test ValueError when lines is not True
423
+ df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
424
+ msg = (
425
+ r"mode='a' \(append\) is only supported when "
426
+ "lines is True and orient is 'records'"
427
+ )
428
+ with pytest.raises(ValueError, match=msg):
429
+ df.to_json(mode="a", lines=False, orient="records")
430
+
431
+
432
+ @pytest.mark.parametrize("mode_", ["r", "x"])
433
+ def test_to_json_append_mode(mode_):
434
+ # GH 35849
435
+ # Test ValueError when mode is not supported option
436
+ df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
437
+ msg = (
438
+ f"mode={mode_} is not a valid option."
439
+ "Only 'w' and 'a' are currently supported."
440
+ )
441
+ with pytest.raises(ValueError, match=msg):
442
+ df.to_json(mode=mode_, lines=False, orient="records")
443
+
444
+
445
+ def test_to_json_append_output_consistent_columns():
446
+ # GH 35849
447
+ # Testing that resulting output reads in as expected.
448
+ # Testing same columns, new rows
449
+ df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
450
+ df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})
451
+
452
+ expected = DataFrame({"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]})
453
+ with tm.ensure_clean("test.json") as path:
454
+ # Save dataframes to the same file
455
+ df1.to_json(path, lines=True, orient="records")
456
+ df2.to_json(path, mode="a", lines=True, orient="records")
457
+
458
+ # Read path file
459
+ result = read_json(path, lines=True)
460
+ tm.assert_frame_equal(result, expected)
461
+
462
+
463
+ def test_to_json_append_output_inconsistent_columns():
464
+ # GH 35849
465
+ # Testing that resulting output reads in as expected.
466
+ # Testing one new column, one old column, new rows
467
+ df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
468
+ df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})
469
+
470
+ expected = DataFrame(
471
+ {
472
+ "col1": [1, 2, None, None],
473
+ "col2": ["a", "b", "e", "f"],
474
+ "col3": [np.nan, np.nan, "!", "#"],
475
+ }
476
+ )
477
+ with tm.ensure_clean("test.json") as path:
478
+ # Save dataframes to the same file
479
+ df1.to_json(path, mode="a", lines=True, orient="records")
480
+ df3.to_json(path, mode="a", lines=True, orient="records")
481
+
482
+ # Read path file
483
+ result = read_json(path, lines=True)
484
+ tm.assert_frame_equal(result, expected)
485
+
486
+
487
+ def test_to_json_append_output_different_columns():
488
+ # GH 35849
489
+ # Testing that resulting output reads in as expected.
490
+ # Testing same, differing and new columns
491
+ df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
492
+ df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})
493
+ df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})
494
+ df4 = DataFrame({"col4": [True, False]})
495
+
496
+ expected = DataFrame(
497
+ {
498
+ "col1": [1, 2, 3, 4, None, None, None, None],
499
+ "col2": ["a", "b", "c", "d", "e", "f", np.nan, np.nan],
500
+ "col3": [np.nan, np.nan, np.nan, np.nan, "!", "#", np.nan, np.nan],
501
+ "col4": [None, None, None, None, None, None, True, False],
502
+ }
503
+ ).astype({"col4": "float"})
504
+ with tm.ensure_clean("test.json") as path:
505
+ # Save dataframes to the same file
506
+ df1.to_json(path, mode="a", lines=True, orient="records")
507
+ df2.to_json(path, mode="a", lines=True, orient="records")
508
+ df3.to_json(path, mode="a", lines=True, orient="records")
509
+ df4.to_json(path, mode="a", lines=True, orient="records")
510
+
511
+ # Read path file
512
+ result = read_json(path, lines=True)
513
+ tm.assert_frame_equal(result, expected)
514
+
515
+
516
+ def test_to_json_append_output_different_columns_reordered():
517
+ # GH 35849
518
+ # Testing that resulting output reads in as expected.
519
+ # Testing specific result column order.
520
+ df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
521
+ df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})
522
+ df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})
523
+ df4 = DataFrame({"col4": [True, False]})
524
+
525
+ # df4, df3, df2, df1 (in that order)
526
+ expected = DataFrame(
527
+ {
528
+ "col4": [True, False, None, None, None, None, None, None],
529
+ "col2": [np.nan, np.nan, "e", "f", "c", "d", "a", "b"],
530
+ "col3": [np.nan, np.nan, "!", "#", np.nan, np.nan, np.nan, np.nan],
531
+ "col1": [None, None, None, None, 3, 4, 1, 2],
532
+ }
533
+ ).astype({"col4": "float"})
534
+ with tm.ensure_clean("test.json") as path:
535
+ # Save dataframes to the same file
536
+ df4.to_json(path, mode="a", lines=True, orient="records")
537
+ df3.to_json(path, mode="a", lines=True, orient="records")
538
+ df2.to_json(path, mode="a", lines=True, orient="records")
539
+ df1.to_json(path, mode="a", lines=True, orient="records")
540
+
541
+ # Read path file
542
+ result = read_json(path, lines=True)
543
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py ADDED
@@ -0,0 +1,1087 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import calendar
2
+ import datetime
3
+ import decimal
4
+ import json
5
+ import locale
6
+ import math
7
+ import re
8
+ import time
9
+
10
+ import dateutil
11
+ import numpy as np
12
+ import pytest
13
+ import pytz
14
+
15
+ import pandas._libs.json as ujson
16
+ from pandas.compat import IS64
17
+
18
+ from pandas import (
19
+ DataFrame,
20
+ DatetimeIndex,
21
+ Index,
22
+ NaT,
23
+ PeriodIndex,
24
+ Series,
25
+ Timedelta,
26
+ Timestamp,
27
+ date_range,
28
+ )
29
+ import pandas._testing as tm
30
+
31
+
32
+ def _clean_dict(d):
33
+ """
34
+ Sanitize dictionary for JSON by converting all keys to strings.
35
+
36
+ Parameters
37
+ ----------
38
+ d : dict
39
+ The dictionary to convert.
40
+
41
+ Returns
42
+ -------
43
+ cleaned_dict : dict
44
+ """
45
+ return {str(k): v for k, v in d.items()}
46
+
47
+
48
+ @pytest.fixture(
49
+ params=[None, "split", "records", "values", "index"] # Column indexed by default.
50
+ )
51
+ def orient(request):
52
+ return request.param
53
+
54
+
55
+ class TestUltraJSONTests:
56
+ @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865")
57
+ def test_encode_decimal(self):
58
+ sut = decimal.Decimal("1337.1337")
59
+ encoded = ujson.ujson_dumps(sut, double_precision=15)
60
+ decoded = ujson.ujson_loads(encoded)
61
+ assert decoded == 1337.1337
62
+
63
+ sut = decimal.Decimal("0.95")
64
+ encoded = ujson.ujson_dumps(sut, double_precision=1)
65
+ assert encoded == "1.0"
66
+
67
+ decoded = ujson.ujson_loads(encoded)
68
+ assert decoded == 1.0
69
+
70
+ sut = decimal.Decimal("0.94")
71
+ encoded = ujson.ujson_dumps(sut, double_precision=1)
72
+ assert encoded == "0.9"
73
+
74
+ decoded = ujson.ujson_loads(encoded)
75
+ assert decoded == 0.9
76
+
77
+ sut = decimal.Decimal("1.95")
78
+ encoded = ujson.ujson_dumps(sut, double_precision=1)
79
+ assert encoded == "2.0"
80
+
81
+ decoded = ujson.ujson_loads(encoded)
82
+ assert decoded == 2.0
83
+
84
+ sut = decimal.Decimal("-1.95")
85
+ encoded = ujson.ujson_dumps(sut, double_precision=1)
86
+ assert encoded == "-2.0"
87
+
88
+ decoded = ujson.ujson_loads(encoded)
89
+ assert decoded == -2.0
90
+
91
+ sut = decimal.Decimal("0.995")
92
+ encoded = ujson.ujson_dumps(sut, double_precision=2)
93
+ assert encoded == "1.0"
94
+
95
+ decoded = ujson.ujson_loads(encoded)
96
+ assert decoded == 1.0
97
+
98
+ sut = decimal.Decimal("0.9995")
99
+ encoded = ujson.ujson_dumps(sut, double_precision=3)
100
+ assert encoded == "1.0"
101
+
102
+ decoded = ujson.ujson_loads(encoded)
103
+ assert decoded == 1.0
104
+
105
+ sut = decimal.Decimal("0.99999999999999944")
106
+ encoded = ujson.ujson_dumps(sut, double_precision=15)
107
+ assert encoded == "1.0"
108
+
109
+ decoded = ujson.ujson_loads(encoded)
110
+ assert decoded == 1.0
111
+
112
+ @pytest.mark.parametrize("ensure_ascii", [True, False])
113
+ def test_encode_string_conversion(self, ensure_ascii):
114
+ string_input = "A string \\ / \b \f \n \r \t </script> &"
115
+ not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"'
116
+ html_encoded = (
117
+ '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"'
118
+ )
119
+
120
+ def helper(expected_output, **encode_kwargs):
121
+ output = ujson.ujson_dumps(
122
+ string_input, ensure_ascii=ensure_ascii, **encode_kwargs
123
+ )
124
+
125
+ assert output == expected_output
126
+ assert string_input == json.loads(output)
127
+ assert string_input == ujson.ujson_loads(output)
128
+
129
+ # Default behavior assumes encode_html_chars=False.
130
+ helper(not_html_encoded)
131
+
132
+ # Make sure explicit encode_html_chars=False works.
133
+ helper(not_html_encoded, encode_html_chars=False)
134
+
135
+ # Make sure explicit encode_html_chars=True does the encoding.
136
+ helper(html_encoded, encode_html_chars=True)
137
+
138
+ @pytest.mark.parametrize(
139
+ "long_number", [-4342969734183514, -12345678901234.56789012, -528656961.4399388]
140
+ )
141
+ def test_double_long_numbers(self, long_number):
142
+ sut = {"a": long_number}
143
+ encoded = ujson.ujson_dumps(sut, double_precision=15)
144
+
145
+ decoded = ujson.ujson_loads(encoded)
146
+ assert sut == decoded
147
+
148
+ def test_encode_non_c_locale(self):
149
+ lc_category = locale.LC_NUMERIC
150
+
151
+ # We just need one of these locales to work.
152
+ for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
153
+ if tm.can_set_locale(new_locale, lc_category):
154
+ with tm.set_locale(new_locale, lc_category):
155
+ assert ujson.ujson_loads(ujson.ujson_dumps(4.78e60)) == 4.78e60
156
+ assert ujson.ujson_loads("4.78", precise_float=True) == 4.78
157
+ break
158
+
159
+ def test_decimal_decode_test_precise(self):
160
+ sut = {"a": 4.56}
161
+ encoded = ujson.ujson_dumps(sut)
162
+ decoded = ujson.ujson_loads(encoded, precise_float=True)
163
+ assert sut == decoded
164
+
165
+ def test_encode_double_tiny_exponential(self):
166
+ num = 1e-40
167
+ assert num == ujson.ujson_loads(ujson.ujson_dumps(num))
168
+ num = 1e-100
169
+ assert num == ujson.ujson_loads(ujson.ujson_dumps(num))
170
+ num = -1e-45
171
+ assert num == ujson.ujson_loads(ujson.ujson_dumps(num))
172
+ num = -1e-145
173
+ assert np.allclose(num, ujson.ujson_loads(ujson.ujson_dumps(num)))
174
+
175
+ @pytest.mark.parametrize("unicode_key", ["key1", "بن"])
176
+ def test_encode_dict_with_unicode_keys(self, unicode_key):
177
+ unicode_dict = {unicode_key: "value1"}
178
+ assert unicode_dict == ujson.ujson_loads(ujson.ujson_dumps(unicode_dict))
179
+
180
+ @pytest.mark.parametrize(
181
+ "double_input", [math.pi, -math.pi] # Should work with negatives too.
182
+ )
183
+ def test_encode_double_conversion(self, double_input):
184
+ output = ujson.ujson_dumps(double_input)
185
+ assert round(double_input, 5) == round(json.loads(output), 5)
186
+ assert round(double_input, 5) == round(ujson.ujson_loads(output), 5)
187
+
188
+ def test_encode_with_decimal(self):
189
+ decimal_input = 1.0
190
+ output = ujson.ujson_dumps(decimal_input)
191
+
192
+ assert output == "1.0"
193
+
194
+ def test_encode_array_of_nested_arrays(self):
195
+ nested_input = [[[[]]]] * 20
196
+ output = ujson.ujson_dumps(nested_input)
197
+
198
+ assert nested_input == json.loads(output)
199
+ assert nested_input == ujson.ujson_loads(output)
200
+
201
+ def test_encode_array_of_doubles(self):
202
+ doubles_input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
203
+ output = ujson.ujson_dumps(doubles_input)
204
+
205
+ assert doubles_input == json.loads(output)
206
+ assert doubles_input == ujson.ujson_loads(output)
207
+
208
+ def test_double_precision(self):
209
+ double_input = 30.012345678901234
210
+ output = ujson.ujson_dumps(double_input, double_precision=15)
211
+
212
+ assert double_input == json.loads(output)
213
+ assert double_input == ujson.ujson_loads(output)
214
+
215
+ for double_precision in (3, 9):
216
+ output = ujson.ujson_dumps(double_input, double_precision=double_precision)
217
+ rounded_input = round(double_input, double_precision)
218
+
219
+ assert rounded_input == json.loads(output)
220
+ assert rounded_input == ujson.ujson_loads(output)
221
+
222
+ @pytest.mark.parametrize(
223
+ "invalid_val",
224
+ [
225
+ 20,
226
+ -1,
227
+ "9",
228
+ None,
229
+ ],
230
+ )
231
+ def test_invalid_double_precision(self, invalid_val):
232
+ double_input = 30.12345678901234567890
233
+ expected_exception = ValueError if isinstance(invalid_val, int) else TypeError
234
+ msg = (
235
+ r"Invalid value '.*' for option 'double_precision', max is '15'|"
236
+ r"an integer is required \(got type |"
237
+ r"object cannot be interpreted as an integer"
238
+ )
239
+ with pytest.raises(expected_exception, match=msg):
240
+ ujson.ujson_dumps(double_input, double_precision=invalid_val)
241
+
242
+ def test_encode_string_conversion2(self):
243
+ string_input = "A string \\ / \b \f \n \r \t"
244
+ output = ujson.ujson_dumps(string_input)
245
+
246
+ assert string_input == json.loads(output)
247
+ assert string_input == ujson.ujson_loads(output)
248
+ assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
249
+
250
+ @pytest.mark.parametrize(
251
+ "unicode_input",
252
+ ["Räksmörgås اسامة بن محمد بن عوض بن لادن", "\xe6\x97\xa5\xd1\x88"],
253
+ )
254
+ def test_encode_unicode_conversion(self, unicode_input):
255
+ enc = ujson.ujson_dumps(unicode_input)
256
+ dec = ujson.ujson_loads(enc)
257
+
258
+ assert enc == json.dumps(unicode_input)
259
+ assert dec == json.loads(enc)
260
+
261
+ def test_encode_control_escaping(self):
262
+ escaped_input = "\x19"
263
+ enc = ujson.ujson_dumps(escaped_input)
264
+ dec = ujson.ujson_loads(enc)
265
+
266
+ assert escaped_input == dec
267
+ assert enc == json.dumps(escaped_input)
268
+
269
+ def test_encode_unicode_surrogate_pair(self):
270
+ surrogate_input = "\xf0\x90\x8d\x86"
271
+ enc = ujson.ujson_dumps(surrogate_input)
272
+ dec = ujson.ujson_loads(enc)
273
+
274
+ assert enc == json.dumps(surrogate_input)
275
+ assert dec == json.loads(enc)
276
+
277
+ def test_encode_unicode_4bytes_utf8(self):
278
+ four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
279
+ enc = ujson.ujson_dumps(four_bytes_input)
280
+ dec = ujson.ujson_loads(enc)
281
+
282
+ assert enc == json.dumps(four_bytes_input)
283
+ assert dec == json.loads(enc)
284
+
285
+ def test_encode_unicode_4bytes_utf8highest(self):
286
+ four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
287
+ enc = ujson.ujson_dumps(four_bytes_input)
288
+
289
+ dec = ujson.ujson_loads(enc)
290
+
291
+ assert enc == json.dumps(four_bytes_input)
292
+ assert dec == json.loads(enc)
293
+
294
+ def test_encode_unicode_error(self):
295
+ string = "'\udac0'"
296
+ msg = (
297
+ r"'utf-8' codec can't encode character '\\udac0' "
298
+ r"in position 1: surrogates not allowed"
299
+ )
300
+ with pytest.raises(UnicodeEncodeError, match=msg):
301
+ ujson.ujson_dumps([string])
302
+
303
+ def test_encode_array_in_array(self):
304
+ arr_in_arr_input = [[[[]]]]
305
+ output = ujson.ujson_dumps(arr_in_arr_input)
306
+
307
+ assert arr_in_arr_input == json.loads(output)
308
+ assert output == json.dumps(arr_in_arr_input)
309
+ assert arr_in_arr_input == ujson.ujson_loads(output)
310
+
311
+ @pytest.mark.parametrize(
312
+ "num_input",
313
+ [
314
+ 31337,
315
+ -31337, # Negative number.
316
+ -9223372036854775808, # Large negative number.
317
+ ],
318
+ )
319
+ def test_encode_num_conversion(self, num_input):
320
+ output = ujson.ujson_dumps(num_input)
321
+ assert num_input == json.loads(output)
322
+ assert output == json.dumps(num_input)
323
+ assert num_input == ujson.ujson_loads(output)
324
+
325
+ def test_encode_list_conversion(self):
326
+ list_input = [1, 2, 3, 4]
327
+ output = ujson.ujson_dumps(list_input)
328
+
329
+ assert list_input == json.loads(output)
330
+ assert list_input == ujson.ujson_loads(output)
331
+
332
+ def test_encode_dict_conversion(self):
333
+ dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
334
+ output = ujson.ujson_dumps(dict_input)
335
+
336
+ assert dict_input == json.loads(output)
337
+ assert dict_input == ujson.ujson_loads(output)
338
+
339
+ @pytest.mark.parametrize("builtin_value", [None, True, False])
340
+ def test_encode_builtin_values_conversion(self, builtin_value):
341
+ output = ujson.ujson_dumps(builtin_value)
342
+ assert builtin_value == json.loads(output)
343
+ assert output == json.dumps(builtin_value)
344
+ assert builtin_value == ujson.ujson_loads(output)
345
+
346
+ def test_encode_datetime_conversion(self):
347
+ datetime_input = datetime.datetime.fromtimestamp(time.time())
348
+ output = ujson.ujson_dumps(datetime_input, date_unit="s")
349
+ expected = calendar.timegm(datetime_input.utctimetuple())
350
+
351
+ assert int(expected) == json.loads(output)
352
+ assert int(expected) == ujson.ujson_loads(output)
353
+
354
+ def test_encode_date_conversion(self):
355
+ date_input = datetime.date.fromtimestamp(time.time())
356
+ output = ujson.ujson_dumps(date_input, date_unit="s")
357
+
358
+ tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
359
+ expected = calendar.timegm(tup)
360
+
361
+ assert int(expected) == json.loads(output)
362
+ assert int(expected) == ujson.ujson_loads(output)
363
+
364
+ @pytest.mark.parametrize(
365
+ "test",
366
+ [datetime.time(), datetime.time(1, 2, 3), datetime.time(10, 12, 15, 343243)],
367
+ )
368
+ def test_encode_time_conversion_basic(self, test):
369
+ output = ujson.ujson_dumps(test)
370
+ expected = f'"{test.isoformat()}"'
371
+ assert expected == output
372
+
373
+ def test_encode_time_conversion_pytz(self):
374
+ # see gh-11473: to_json segfaults with timezone-aware datetimes
375
+ test = datetime.time(10, 12, 15, 343243, pytz.utc)
376
+ output = ujson.ujson_dumps(test)
377
+ expected = f'"{test.isoformat()}"'
378
+ assert expected == output
379
+
380
+ def test_encode_time_conversion_dateutil(self):
381
+ # see gh-11473: to_json segfaults with timezone-aware datetimes
382
+ test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
383
+ output = ujson.ujson_dumps(test)
384
+ expected = f'"{test.isoformat()}"'
385
+ assert expected == output
386
+
387
+ @pytest.mark.parametrize(
388
+ "decoded_input", [NaT, np.datetime64("NaT"), np.nan, np.inf, -np.inf]
389
+ )
390
+ def test_encode_as_null(self, decoded_input):
391
+ assert ujson.ujson_dumps(decoded_input) == "null", "Expected null"
392
+
393
+ def test_datetime_units(self):
394
+ val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
395
+ stamp = Timestamp(val).as_unit("ns")
396
+
397
+ roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="s"))
398
+ assert roundtrip == stamp._value // 10**9
399
+
400
+ roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ms"))
401
+ assert roundtrip == stamp._value // 10**6
402
+
403
+ roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="us"))
404
+ assert roundtrip == stamp._value // 10**3
405
+
406
+ roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ns"))
407
+ assert roundtrip == stamp._value
408
+
409
+ msg = "Invalid value 'foo' for option 'date_unit'"
410
+ with pytest.raises(ValueError, match=msg):
411
+ ujson.ujson_dumps(val, date_unit="foo")
412
+
413
+ def test_encode_to_utf8(self):
414
+ unencoded = "\xe6\x97\xa5\xd1\x88"
415
+
416
+ enc = ujson.ujson_dumps(unencoded, ensure_ascii=False)
417
+ dec = ujson.ujson_loads(enc)
418
+
419
+ assert enc == json.dumps(unencoded, ensure_ascii=False)
420
+ assert dec == json.loads(enc)
421
+
422
+ def test_decode_from_unicode(self):
423
+ unicode_input = '{"obj": 31337}'
424
+
425
+ dec1 = ujson.ujson_loads(unicode_input)
426
+ dec2 = ujson.ujson_loads(str(unicode_input))
427
+
428
+ assert dec1 == dec2
429
+
430
+ def test_encode_recursion_max(self):
431
+ # 8 is the max recursion depth
432
+
433
+ class O2:
434
+ member = 0
435
+
436
+ class O1:
437
+ member = 0
438
+
439
+ decoded_input = O1()
440
+ decoded_input.member = O2()
441
+ decoded_input.member.member = decoded_input
442
+
443
+ with pytest.raises(OverflowError, match="Maximum recursion level reached"):
444
+ ujson.ujson_dumps(decoded_input)
445
+
446
+ def test_decode_jibberish(self):
447
+ jibberish = "fdsa sda v9sa fdsa"
448
+ msg = "Unexpected character found when decoding 'false'"
449
+ with pytest.raises(ValueError, match=msg):
450
+ ujson.ujson_loads(jibberish)
451
+
452
+ @pytest.mark.parametrize(
453
+ "broken_json",
454
+ [
455
+ "[", # Broken array start.
456
+ "{", # Broken object start.
457
+ "]", # Broken array end.
458
+ "}", # Broken object end.
459
+ ],
460
+ )
461
+ def test_decode_broken_json(self, broken_json):
462
+ msg = "Expected object or value"
463
+ with pytest.raises(ValueError, match=msg):
464
+ ujson.ujson_loads(broken_json)
465
+
466
+ @pytest.mark.parametrize("too_big_char", ["[", "{"])
467
+ def test_decode_depth_too_big(self, too_big_char):
468
+ with pytest.raises(ValueError, match="Reached object decoding depth limit"):
469
+ ujson.ujson_loads(too_big_char * (1024 * 1024))
470
+
471
+ @pytest.mark.parametrize(
472
+ "bad_string",
473
+ [
474
+ '"TESTING', # Unterminated.
475
+ '"TESTING\\"', # Unterminated escape.
476
+ "tru", # Broken True.
477
+ "fa", # Broken False.
478
+ "n", # Broken None.
479
+ ],
480
+ )
481
+ def test_decode_bad_string(self, bad_string):
482
+ msg = (
483
+ "Unexpected character found when decoding|"
484
+ "Unmatched ''\"' when when decoding 'string'"
485
+ )
486
+ with pytest.raises(ValueError, match=msg):
487
+ ujson.ujson_loads(bad_string)
488
+
489
+ @pytest.mark.parametrize(
490
+ "broken_json, err_msg",
491
+ [
492
+ (
493
+ '{{1337:""}}',
494
+ "Key name of object must be 'string' when decoding 'object'",
495
+ ),
496
+ ('{{"key":"}', "Unmatched ''\"' when when decoding 'string'"),
497
+ ("[[[true", "Unexpected character found when decoding array value (2)"),
498
+ ],
499
+ )
500
+ def test_decode_broken_json_leak(self, broken_json, err_msg):
501
+ for _ in range(1000):
502
+ with pytest.raises(ValueError, match=re.escape(err_msg)):
503
+ ujson.ujson_loads(broken_json)
504
+
505
+ @pytest.mark.parametrize(
506
+ "invalid_dict",
507
+ [
508
+ "{{{{31337}}}}", # No key.
509
+ '{{{{"key":}}}}', # No value.
510
+ '{{{{"key"}}}}', # No colon or value.
511
+ ],
512
+ )
513
+ def test_decode_invalid_dict(self, invalid_dict):
514
+ msg = (
515
+ "Key name of object must be 'string' when decoding 'object'|"
516
+ "No ':' found when decoding object value|"
517
+ "Expected object or value"
518
+ )
519
+ with pytest.raises(ValueError, match=msg):
520
+ ujson.ujson_loads(invalid_dict)
521
+
522
+ @pytest.mark.parametrize(
523
+ "numeric_int_as_str", ["31337", "-31337"] # Should work with negatives.
524
+ )
525
+ def test_decode_numeric_int(self, numeric_int_as_str):
526
+ assert int(numeric_int_as_str) == ujson.ujson_loads(numeric_int_as_str)
527
+
528
+ def test_encode_null_character(self):
529
+ wrapped_input = "31337 \x00 1337"
530
+ output = ujson.ujson_dumps(wrapped_input)
531
+
532
+ assert wrapped_input == json.loads(output)
533
+ assert output == json.dumps(wrapped_input)
534
+ assert wrapped_input == ujson.ujson_loads(output)
535
+
536
+ alone_input = "\x00"
537
+ output = ujson.ujson_dumps(alone_input)
538
+
539
+ assert alone_input == json.loads(output)
540
+ assert output == json.dumps(alone_input)
541
+ assert alone_input == ujson.ujson_loads(output)
542
+ assert '" \\u0000\\r\\n "' == ujson.ujson_dumps(" \u0000\r\n ")
543
+
544
+ def test_decode_null_character(self):
545
+ wrapped_input = '"31337 \\u0000 31337"'
546
+ assert ujson.ujson_loads(wrapped_input) == json.loads(wrapped_input)
547
+
548
+ def test_encode_list_long_conversion(self):
549
+ long_input = [
550
+ 9223372036854775807,
551
+ 9223372036854775807,
552
+ 9223372036854775807,
553
+ 9223372036854775807,
554
+ 9223372036854775807,
555
+ 9223372036854775807,
556
+ ]
557
+ output = ujson.ujson_dumps(long_input)
558
+
559
+ assert long_input == json.loads(output)
560
+ assert long_input == ujson.ujson_loads(output)
561
+
562
+ @pytest.mark.parametrize("long_input", [9223372036854775807, 18446744073709551615])
563
+ def test_encode_long_conversion(self, long_input):
564
+ output = ujson.ujson_dumps(long_input)
565
+
566
+ assert long_input == json.loads(output)
567
+ assert output == json.dumps(long_input)
568
+ assert long_input == ujson.ujson_loads(output)
569
+
570
+ @pytest.mark.parametrize("bigNum", [2**64, -(2**63) - 1])
571
+ def test_dumps_ints_larger_than_maxsize(self, bigNum):
572
+ encoding = ujson.ujson_dumps(bigNum)
573
+ assert str(bigNum) == encoding
574
+
575
+ with pytest.raises(
576
+ ValueError,
577
+ match="Value is too big|Value is too small",
578
+ ):
579
+ assert ujson.ujson_loads(encoding) == bigNum
580
+
581
+ @pytest.mark.parametrize(
582
+ "int_exp", ["1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"]
583
+ )
584
+ def test_decode_numeric_int_exp(self, int_exp):
585
+ assert ujson.ujson_loads(int_exp) == json.loads(int_exp)
586
+
587
+ def test_loads_non_str_bytes_raises(self):
588
+ msg = "a bytes-like object is required, not 'NoneType'"
589
+ with pytest.raises(TypeError, match=msg):
590
+ ujson.ujson_loads(None)
591
+
592
+ @pytest.mark.parametrize("val", [3590016419, 2**31, 2**32, (2**32) - 1])
593
+ def test_decode_number_with_32bit_sign_bit(self, val):
594
+ # Test that numbers that fit within 32 bits but would have the
595
+ # sign bit set (2**31 <= x < 2**32) are decoded properly.
596
+ doc = f'{{"id": {val}}}'
597
+ assert ujson.ujson_loads(doc)["id"] == val
598
+
599
+ def test_encode_big_escape(self):
600
+ # Make sure no Exception is raised.
601
+ for _ in range(10):
602
+ base = "\u00e5".encode()
603
+ escape_input = base * 1024 * 1024 * 2
604
+ ujson.ujson_dumps(escape_input)
605
+
606
+ def test_decode_big_escape(self):
607
+ # Make sure no Exception is raised.
608
+ for _ in range(10):
609
+ base = "\u00e5".encode()
610
+ quote = b'"'
611
+
612
+ escape_input = quote + (base * 1024 * 1024 * 2) + quote
613
+ ujson.ujson_loads(escape_input)
614
+
615
+ def test_to_dict(self):
616
+ d = {"key": 31337}
617
+
618
+ class DictTest:
619
+ def toDict(self):
620
+ return d
621
+
622
+ o = DictTest()
623
+ output = ujson.ujson_dumps(o)
624
+
625
+ dec = ujson.ujson_loads(output)
626
+ assert dec == d
627
+
628
+ def test_default_handler(self):
629
+ class _TestObject:
630
+ def __init__(self, val) -> None:
631
+ self.val = val
632
+
633
+ @property
634
+ def recursive_attr(self):
635
+ return _TestObject("recursive_attr")
636
+
637
+ def __str__(self) -> str:
638
+ return str(self.val)
639
+
640
+ msg = "Maximum recursion level reached"
641
+ with pytest.raises(OverflowError, match=msg):
642
+ ujson.ujson_dumps(_TestObject("foo"))
643
+ assert '"foo"' == ujson.ujson_dumps(_TestObject("foo"), default_handler=str)
644
+
645
+ def my_handler(_):
646
+ return "foobar"
647
+
648
+ assert '"foobar"' == ujson.ujson_dumps(
649
+ _TestObject("foo"), default_handler=my_handler
650
+ )
651
+
652
+ def my_handler_raises(_):
653
+ raise TypeError("I raise for anything")
654
+
655
+ with pytest.raises(TypeError, match="I raise for anything"):
656
+ ujson.ujson_dumps(_TestObject("foo"), default_handler=my_handler_raises)
657
+
658
+ def my_int_handler(_):
659
+ return 42
660
+
661
+ assert (
662
+ ujson.ujson_loads(
663
+ ujson.ujson_dumps(_TestObject("foo"), default_handler=my_int_handler)
664
+ )
665
+ == 42
666
+ )
667
+
668
+ def my_obj_handler(_):
669
+ return datetime.datetime(2013, 2, 3)
670
+
671
+ assert ujson.ujson_loads(
672
+ ujson.ujson_dumps(datetime.datetime(2013, 2, 3))
673
+ ) == ujson.ujson_loads(
674
+ ujson.ujson_dumps(_TestObject("foo"), default_handler=my_obj_handler)
675
+ )
676
+
677
+ obj_list = [_TestObject("foo"), _TestObject("bar")]
678
+ assert json.loads(json.dumps(obj_list, default=str)) == ujson.ujson_loads(
679
+ ujson.ujson_dumps(obj_list, default_handler=str)
680
+ )
681
+
682
+ def test_encode_object(self):
683
+ class _TestObject:
684
+ def __init__(self, a, b, _c, d) -> None:
685
+ self.a = a
686
+ self.b = b
687
+ self._c = _c
688
+ self.d = d
689
+
690
+ def e(self):
691
+ return 5
692
+
693
+ # JSON keys should be all non-callable non-underscore attributes, see GH-42768
694
+ test_object = _TestObject(a=1, b=2, _c=3, d=4)
695
+ assert ujson.ujson_loads(ujson.ujson_dumps(test_object)) == {
696
+ "a": 1,
697
+ "b": 2,
698
+ "d": 4,
699
+ }
700
+
701
+ def test_ujson__name__(self):
702
+ # GH 52898
703
+ assert ujson.__name__ == "pandas._libs.json"
704
+
705
+
706
+ class TestNumpyJSONTests:
707
+ @pytest.mark.parametrize("bool_input", [True, False])
708
+ def test_bool(self, bool_input):
709
+ b = bool(bool_input)
710
+ assert ujson.ujson_loads(ujson.ujson_dumps(b)) == b
711
+
712
+ def test_bool_array(self):
713
+ bool_array = np.array(
714
+ [True, False, True, True, False, True, False, False], dtype=bool
715
+ )
716
+ output = np.array(ujson.ujson_loads(ujson.ujson_dumps(bool_array)), dtype=bool)
717
+ tm.assert_numpy_array_equal(bool_array, output)
718
+
719
+ def test_int(self, any_int_numpy_dtype):
720
+ klass = np.dtype(any_int_numpy_dtype).type
721
+ num = klass(1)
722
+
723
+ assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
724
+
725
+ def test_int_array(self, any_int_numpy_dtype):
726
+ arr = np.arange(100, dtype=int)
727
+ arr_input = arr.astype(any_int_numpy_dtype)
728
+
729
+ arr_output = np.array(
730
+ ujson.ujson_loads(ujson.ujson_dumps(arr_input)), dtype=any_int_numpy_dtype
731
+ )
732
+ tm.assert_numpy_array_equal(arr_input, arr_output)
733
+
734
+ def test_int_max(self, any_int_numpy_dtype):
735
+ if any_int_numpy_dtype in ("int64", "uint64") and not IS64:
736
+ pytest.skip("Cannot test 64-bit integer on 32-bit platform")
737
+
738
+ klass = np.dtype(any_int_numpy_dtype).type
739
+
740
+ # uint64 max will always overflow,
741
+ # as it's encoded to signed.
742
+ if any_int_numpy_dtype == "uint64":
743
+ num = np.iinfo("int64").max
744
+ else:
745
+ num = np.iinfo(any_int_numpy_dtype).max
746
+
747
+ assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
748
+
749
+ def test_float(self, float_numpy_dtype):
750
+ klass = np.dtype(float_numpy_dtype).type
751
+ num = klass(256.2013)
752
+
753
+ assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
754
+
755
+ def test_float_array(self, float_numpy_dtype):
756
+ arr = np.arange(12.5, 185.72, 1.7322, dtype=float)
757
+ float_input = arr.astype(float_numpy_dtype)
758
+
759
+ float_output = np.array(
760
+ ujson.ujson_loads(ujson.ujson_dumps(float_input, double_precision=15)),
761
+ dtype=float_numpy_dtype,
762
+ )
763
+ tm.assert_almost_equal(float_input, float_output)
764
+
765
+ def test_float_max(self, float_numpy_dtype):
766
+ klass = np.dtype(float_numpy_dtype).type
767
+ num = klass(np.finfo(float_numpy_dtype).max / 10)
768
+
769
+ tm.assert_almost_equal(
770
+ klass(ujson.ujson_loads(ujson.ujson_dumps(num, double_precision=15))), num
771
+ )
772
+
773
+ def test_array_basic(self):
774
+ arr = np.arange(96)
775
+ arr = arr.reshape((2, 2, 2, 2, 3, 2))
776
+
777
+ tm.assert_numpy_array_equal(
778
+ np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr
779
+ )
780
+
781
+ @pytest.mark.parametrize("shape", [(10, 10), (5, 5, 4), (100, 1)])
782
+ def test_array_reshaped(self, shape):
783
+ arr = np.arange(100)
784
+ arr = arr.reshape(shape)
785
+
786
+ tm.assert_numpy_array_equal(
787
+ np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr
788
+ )
789
+
790
+ def test_array_list(self):
791
+ arr_list = [
792
+ "a",
793
+ [],
794
+ {},
795
+ {},
796
+ [],
797
+ 42,
798
+ 97.8,
799
+ ["a", "b"],
800
+ {"key": "val"},
801
+ ]
802
+ arr = np.array(arr_list, dtype=object)
803
+ result = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=object)
804
+ tm.assert_numpy_array_equal(result, arr)
805
+
806
+ def test_array_float(self):
807
+ dtype = np.float32
808
+
809
+ arr = np.arange(100.202, 200.202, 1, dtype=dtype)
810
+ arr = arr.reshape((5, 5, 4))
811
+
812
+ arr_out = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=dtype)
813
+ tm.assert_almost_equal(arr, arr_out)
814
+
815
+ def test_0d_array(self):
816
+ # gh-18878
817
+ msg = re.escape(
818
+ "array(1) (numpy-scalar) is not JSON serializable at the moment"
819
+ )
820
+ with pytest.raises(TypeError, match=msg):
821
+ ujson.ujson_dumps(np.array(1))
822
+
823
+ def test_array_long_double(self):
824
+ msg = re.compile(
825
+ "1234.5.* \\(numpy-scalar\\) is not JSON serializable at the moment"
826
+ )
827
+ with pytest.raises(TypeError, match=msg):
828
+ ujson.ujson_dumps(np.longdouble(1234.5))
829
+
830
+
831
+ class TestPandasJSONTests:
832
+ def test_dataframe(self, orient):
833
+ dtype = np.int64
834
+
835
+ df = DataFrame(
836
+ [[1, 2, 3], [4, 5, 6]],
837
+ index=["a", "b"],
838
+ columns=["x", "y", "z"],
839
+ dtype=dtype,
840
+ )
841
+ encode_kwargs = {} if orient is None else {"orient": orient}
842
+ assert (df.dtypes == dtype).all()
843
+
844
+ output = ujson.ujson_loads(ujson.ujson_dumps(df, **encode_kwargs))
845
+ assert (df.dtypes == dtype).all()
846
+
847
+ # Ensure proper DataFrame initialization.
848
+ if orient == "split":
849
+ dec = _clean_dict(output)
850
+ output = DataFrame(**dec)
851
+ else:
852
+ output = DataFrame(output)
853
+
854
+ # Corrections to enable DataFrame comparison.
855
+ if orient == "values":
856
+ df.columns = [0, 1, 2]
857
+ df.index = [0, 1]
858
+ elif orient == "records":
859
+ df.index = [0, 1]
860
+ elif orient == "index":
861
+ df = df.transpose()
862
+
863
+ assert (df.dtypes == dtype).all()
864
+ tm.assert_frame_equal(output, df)
865
+
866
+ def test_dataframe_nested(self, orient):
867
+ df = DataFrame(
868
+ [[1, 2, 3], [4, 5, 6]], index=["a", "b"], columns=["x", "y", "z"]
869
+ )
870
+
871
+ nested = {"df1": df, "df2": df.copy()}
872
+ kwargs = {} if orient is None else {"orient": orient}
873
+
874
+ exp = {
875
+ "df1": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),
876
+ "df2": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),
877
+ }
878
+ assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp
879
+
880
+ def test_series(self, orient):
881
+ dtype = np.int64
882
+ s = Series(
883
+ [10, 20, 30, 40, 50, 60],
884
+ name="series",
885
+ index=[6, 7, 8, 9, 10, 15],
886
+ dtype=dtype,
887
+ ).sort_values()
888
+ assert s.dtype == dtype
889
+
890
+ encode_kwargs = {} if orient is None else {"orient": orient}
891
+
892
+ output = ujson.ujson_loads(ujson.ujson_dumps(s, **encode_kwargs))
893
+ assert s.dtype == dtype
894
+
895
+ if orient == "split":
896
+ dec = _clean_dict(output)
897
+ output = Series(**dec)
898
+ else:
899
+ output = Series(output)
900
+
901
+ if orient in (None, "index"):
902
+ s.name = None
903
+ output = output.sort_values()
904
+ s.index = ["6", "7", "8", "9", "10", "15"]
905
+ elif orient in ("records", "values"):
906
+ s.name = None
907
+ s.index = [0, 1, 2, 3, 4, 5]
908
+
909
+ assert s.dtype == dtype
910
+ tm.assert_series_equal(output, s)
911
+
912
+ def test_series_nested(self, orient):
913
+ s = Series(
914
+ [10, 20, 30, 40, 50, 60], name="series", index=[6, 7, 8, 9, 10, 15]
915
+ ).sort_values()
916
+ nested = {"s1": s, "s2": s.copy()}
917
+ kwargs = {} if orient is None else {"orient": orient}
918
+
919
+ exp = {
920
+ "s1": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),
921
+ "s2": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),
922
+ }
923
+ assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp
924
+
925
+ def test_index(self):
926
+ i = Index([23, 45, 18, 98, 43, 11], name="index")
927
+
928
+ # Column indexed.
929
+ output = Index(ujson.ujson_loads(ujson.ujson_dumps(i)), name="index")
930
+ tm.assert_index_equal(i, output)
931
+
932
+ dec = _clean_dict(ujson.ujson_loads(ujson.ujson_dumps(i, orient="split")))
933
+ output = Index(**dec)
934
+
935
+ tm.assert_index_equal(i, output)
936
+ assert i.name == output.name
937
+
938
+ tm.assert_index_equal(i, output)
939
+ assert i.name == output.name
940
+
941
+ output = Index(
942
+ ujson.ujson_loads(ujson.ujson_dumps(i, orient="values")), name="index"
943
+ )
944
+ tm.assert_index_equal(i, output)
945
+
946
+ output = Index(
947
+ ujson.ujson_loads(ujson.ujson_dumps(i, orient="records")), name="index"
948
+ )
949
+ tm.assert_index_equal(i, output)
950
+
951
+ output = Index(
952
+ ujson.ujson_loads(ujson.ujson_dumps(i, orient="index")), name="index"
953
+ )
954
+ tm.assert_index_equal(i, output)
955
+
956
+ def test_datetime_index(self):
957
+ date_unit = "ns"
958
+
959
+ # freq doesn't round-trip
960
+ rng = DatetimeIndex(list(date_range("1/1/2000", periods=20)), freq=None)
961
+ encoded = ujson.ujson_dumps(rng, date_unit=date_unit)
962
+
963
+ decoded = DatetimeIndex(np.array(ujson.ujson_loads(encoded)))
964
+ tm.assert_index_equal(rng, decoded)
965
+
966
+ ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
967
+ decoded = Series(ujson.ujson_loads(ujson.ujson_dumps(ts, date_unit=date_unit)))
968
+
969
+ idx_values = decoded.index.values.astype(np.int64)
970
+ decoded.index = DatetimeIndex(idx_values)
971
+ tm.assert_series_equal(ts, decoded)
972
+
973
+ @pytest.mark.parametrize(
974
+ "invalid_arr",
975
+ [
976
+ "[31337,]", # Trailing comma.
977
+ "[,31337]", # Leading comma.
978
+ "[]]", # Unmatched bracket.
979
+ "[,]", # Only comma.
980
+ ],
981
+ )
982
+ def test_decode_invalid_array(self, invalid_arr):
983
+ msg = (
984
+ "Expected object or value|Trailing data|"
985
+ "Unexpected character found when decoding array value"
986
+ )
987
+ with pytest.raises(ValueError, match=msg):
988
+ ujson.ujson_loads(invalid_arr)
989
+
990
+ @pytest.mark.parametrize("arr", [[], [31337]])
991
+ def test_decode_array(self, arr):
992
+ assert arr == ujson.ujson_loads(str(arr))
993
+
994
+ @pytest.mark.parametrize("extreme_num", [9223372036854775807, -9223372036854775808])
995
+ def test_decode_extreme_numbers(self, extreme_num):
996
+ assert extreme_num == ujson.ujson_loads(str(extreme_num))
997
+
998
+ @pytest.mark.parametrize("too_extreme_num", [f"{2**64}", f"{-2**63-1}"])
999
+ def test_decode_too_extreme_numbers(self, too_extreme_num):
1000
+ with pytest.raises(
1001
+ ValueError,
1002
+ match="Value is too big|Value is too small",
1003
+ ):
1004
+ ujson.ujson_loads(too_extreme_num)
1005
+
1006
+ def test_decode_with_trailing_whitespaces(self):
1007
+ assert {} == ujson.ujson_loads("{}\n\t ")
1008
+
1009
+ def test_decode_with_trailing_non_whitespaces(self):
1010
+ with pytest.raises(ValueError, match="Trailing data"):
1011
+ ujson.ujson_loads("{}\n\t a")
1012
+
1013
+ @pytest.mark.parametrize("value", [f"{2**64}", f"{-2**63-1}"])
1014
+ def test_decode_array_with_big_int(self, value):
1015
+ with pytest.raises(
1016
+ ValueError,
1017
+ match="Value is too big|Value is too small",
1018
+ ):
1019
+ ujson.ujson_loads(value)
1020
+
1021
+ @pytest.mark.parametrize(
1022
+ "float_number",
1023
+ [
1024
+ 1.1234567893,
1025
+ 1.234567893,
1026
+ 1.34567893,
1027
+ 1.4567893,
1028
+ 1.567893,
1029
+ 1.67893,
1030
+ 1.7893,
1031
+ 1.893,
1032
+ 1.3,
1033
+ ],
1034
+ )
1035
+ @pytest.mark.parametrize("sign", [-1, 1])
1036
+ def test_decode_floating_point(self, sign, float_number):
1037
+ float_number *= sign
1038
+ tm.assert_almost_equal(
1039
+ float_number, ujson.ujson_loads(str(float_number)), rtol=1e-15
1040
+ )
1041
+
1042
+ def test_encode_big_set(self):
1043
+ s = set()
1044
+
1045
+ for x in range(100000):
1046
+ s.add(x)
1047
+
1048
+ # Make sure no Exception is raised.
1049
+ ujson.ujson_dumps(s)
1050
+
1051
+ def test_encode_empty_set(self):
1052
+ assert "[]" == ujson.ujson_dumps(set())
1053
+
1054
+ def test_encode_set(self):
1055
+ s = {1, 2, 3, 4, 5, 6, 7, 8, 9}
1056
+ enc = ujson.ujson_dumps(s)
1057
+ dec = ujson.ujson_loads(enc)
1058
+
1059
+ for v in dec:
1060
+ assert v in s
1061
+
1062
+ @pytest.mark.parametrize(
1063
+ "td",
1064
+ [
1065
+ Timedelta(days=366),
1066
+ Timedelta(days=-1),
1067
+ Timedelta(hours=13, minutes=5, seconds=5),
1068
+ Timedelta(hours=13, minutes=20, seconds=30),
1069
+ Timedelta(days=-1, nanoseconds=5),
1070
+ Timedelta(nanoseconds=1),
1071
+ Timedelta(microseconds=1, nanoseconds=1),
1072
+ Timedelta(milliseconds=1, microseconds=1, nanoseconds=1),
1073
+ Timedelta(milliseconds=999, microseconds=999, nanoseconds=999),
1074
+ ],
1075
+ )
1076
+ def test_encode_timedelta_iso(self, td):
1077
+ # GH 28256
1078
+ result = ujson.ujson_dumps(td, iso_dates=True)
1079
+ expected = f'"{td.isoformat()}"'
1080
+
1081
+ assert result == expected
1082
+
1083
+ def test_encode_periodindex(self):
1084
+ # GH 46683
1085
+ p = PeriodIndex(["2022-04-06", "2022-04-07"], freq="D")
1086
+ df = DataFrame(index=p)
1087
+ assert df.to_json() == "{}"
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc ADDED
Binary file (9.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc ADDED
Binary file (8.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (412 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc ADDED
Binary file (24.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc ADDED
Binary file (4.46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc ADDED
Binary file (2.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc ADDED
Binary file (4.92 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc ADDED
Binary file (8.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc ADDED
Binary file (3.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc ADDED
Binary file (799 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc ADDED
Binary file (2.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc ADDED
Binary file (31.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc ADDED
Binary file (9.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Generator
2
+ from contextlib import contextmanager
3
+ import pathlib
4
+ import tempfile
5
+
6
+ import pytest
7
+
8
+ from pandas.io.pytables import HDFStore
9
+
10
+ tables = pytest.importorskip("tables")
11
+ # set these parameters so we don't have file sharing
12
+ tables.parameters.MAX_NUMEXPR_THREADS = 1
13
+ tables.parameters.MAX_BLOSC_THREADS = 1
14
+ tables.parameters.MAX_THREADS = 1
15
+
16
+
17
+ def safe_close(store):
18
+ try:
19
+ if store is not None:
20
+ store.close()
21
+ except OSError:
22
+ pass
23
+
24
+
25
+ # contextmanager to ensure the file cleanup
26
+ @contextmanager
27
+ def ensure_clean_store(
28
+ path, mode="a", complevel=None, complib=None, fletcher32=False
29
+ ) -> Generator[HDFStore, None, None]:
30
+ with tempfile.TemporaryDirectory() as tmpdirname:
31
+ tmp_path = pathlib.Path(tmpdirname, path)
32
+ with HDFStore(
33
+ tmp_path,
34
+ mode=mode,
35
+ complevel=complevel,
36
+ complib=complib,
37
+ fletcher32=fletcher32,
38
+ ) as store:
39
+ yield store
40
+
41
+
42
+ def _maybe_remove(store, key):
43
+ """
44
+ For tests using tables, try removing the table to be sure there is
45
+ no content from previous tests using the same table name.
46
+ """
47
+ try:
48
+ store.remove(key)
49
+ except (ValueError, KeyError):
50
+ pass
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+
3
+ import pytest
4
+
5
+
6
+ @pytest.fixture
7
+ def setup_path():
8
+ """Fixture for setup path"""
9
+ return f"tmp.__{uuid.uuid4()}__.h5"