diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b677fea7b784209ea57bdf57c1176501f4324fd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/test_extract_array.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/test_extract_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d80ac557dd861de931d7de9c4b57a7d34175ac7b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/test_extract_array.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/test_extract_array.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/test_extract_array.py new file mode 100644 index 0000000000000000000000000000000000000000..4dd3eda8c995ce022e9d46b907323e79bcd679f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/test_extract_array.py @@ -0,0 +1,18 @@ +from pandas import Index +import pandas._testing as tm +from pandas.core.construction import extract_array + + +def test_extract_array_rangeindex(): + ri = Index(range(5)) + + expected = ri._values + res = extract_array(ri, extract_numpy=True, extract_range=True) + tm.assert_numpy_array_equal(res, expected) + res = extract_array(ri, extract_numpy=False, extract_range=True) + tm.assert_numpy_array_equal(res, expected) + + res = extract_array(ri, extract_numpy=True, extract_range=False) + tm.assert_index_equal(res, ri) + res = extract_array(ri, extract_numpy=False, extract_range=False) + tm.assert_index_equal(res, ri) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d360761df9c326e2422e2c5ecf3515e678d8ce79 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e654a04ffd70e24dc2e839fad8b2f6a1a1337f5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/conftest.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..ab6cacc4cc860d0d4c0ffe948274252daae2ee27 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/conftest.py @@ -0,0 +1,242 @@ +import shlex +import subprocess +import time +import uuid + +import pytest + +from pandas.compat import ( + is_ci_environment, + is_platform_arm, + is_platform_mac, + is_platform_windows, +) +import pandas.util._test_decorators as td + +import pandas.io.common as icom +from pandas.io.parsers import read_csv + + +@pytest.fixture +def compression_to_extension(): + return {value: key for key, value in icom.extension_to_compression.items()} + + +@pytest.fixture +def tips_file(datapath): + """Path to the tips dataset""" + return datapath("io", "data", "csv", "tips.csv") + + +@pytest.fixture +def jsonl_file(datapath): + """Path to a JSONL dataset""" + return datapath("io", "parser", "data", "items.jsonl") + + +@pytest.fixture +def salaries_table(datapath): + """DataFrame with the salaries dataset""" + return read_csv(datapath("io", "parser", "data", "salaries.csv"), sep="\t") + + +@pytest.fixture +def feather_file(datapath): + return datapath("io", "data", "feather", "feather-0_3_1.feather") + + +@pytest.fixture +def xml_file(datapath): + return datapath("io", "data", "xml", "books.xml") + + +@pytest.fixture +def s3_base(worker_id, monkeypatch): + """ + Fixture for mocking S3 interaction. + + Sets up moto server in separate process locally + Return url for motoserver/moto CI service + """ + pytest.importorskip("s3fs") + pytest.importorskip("boto3") + + # temporary workaround as moto fails for botocore >= 1.11 otherwise, + # see https://github.com/spulec/moto/issues/1924 & 1952 + monkeypatch.setenv("AWS_ACCESS_KEY_ID", "foobar_key") + monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "foobar_secret") + if is_ci_environment(): + if is_platform_arm() or is_platform_mac() or is_platform_windows(): + # NOT RUN on Windows/macOS/ARM, only Ubuntu + # - subprocess in CI can cause timeouts + # - GitHub Actions do not support + # container services for the above OSs + # - CircleCI will probably hit the Docker rate pull limit + pytest.skip( + "S3 tests do not have a corresponding service in " + "Windows, macOS or ARM platforms" + ) + else: + # set in .github/workflows/unit-tests.yml + yield "http://localhost:5000" + else: + requests = pytest.importorskip("requests") + pytest.importorskip("moto") + pytest.importorskip("flask") # server mode needs flask too + + # Launching moto in server mode, i.e., as a separate process + # with an S3 endpoint on localhost + + worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw") + endpoint_port = f"555{worker_id}" + endpoint_uri = f"http://127.0.0.1:{endpoint_port}/" + + # pipe to null to avoid logging in terminal + with subprocess.Popen( + shlex.split(f"moto_server s3 -p {endpoint_port}"), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) as proc: + timeout = 5 + while timeout > 0: + try: + # OK to go once server is accepting connections + r = requests.get(endpoint_uri) + if r.ok: + break + except Exception: + pass + timeout -= 0.1 + time.sleep(0.1) + yield endpoint_uri + + proc.terminate() + + +@pytest.fixture +def s3so(s3_base): + return {"client_kwargs": {"endpoint_url": s3_base}} + + +@pytest.fixture +def s3_resource(s3_base): + import boto3 + + s3 = boto3.resource("s3", endpoint_url=s3_base) + return s3 + + +@pytest.fixture +def s3_public_bucket(s3_resource): + bucket = s3_resource.Bucket(f"pandas-test-{uuid.uuid4()}") + bucket.create() + yield bucket + bucket.objects.delete() + bucket.delete() + + +@pytest.fixture +def s3_public_bucket_with_data( + s3_public_bucket, tips_file, jsonl_file, feather_file, xml_file +): + """ + The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + """ + test_s3_files = [ + ("tips#1.csv", tips_file), + ("tips.csv", tips_file), + ("tips.csv.gz", tips_file + ".gz"), + ("tips.csv.bz2", tips_file + ".bz2"), + ("items.jsonl", jsonl_file), + ("simple_dataset.feather", feather_file), + ("books.xml", xml_file), + ] + for s3_key, file_name in test_s3_files: + with open(file_name, "rb") as f: + s3_public_bucket.put_object(Key=s3_key, Body=f) + return s3_public_bucket + + +@pytest.fixture +def s3_private_bucket(s3_resource): + bucket = s3_resource.Bucket(f"cant_get_it-{uuid.uuid4()}") + bucket.create(ACL="private") + yield bucket + bucket.objects.delete() + bucket.delete() + + +@pytest.fixture +def s3_private_bucket_with_data( + s3_private_bucket, tips_file, jsonl_file, feather_file, xml_file +): + """ + The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + """ + test_s3_files = [ + ("tips#1.csv", tips_file), + ("tips.csv", tips_file), + ("tips.csv.gz", tips_file + ".gz"), + ("tips.csv.bz2", tips_file + ".bz2"), + ("items.jsonl", jsonl_file), + ("simple_dataset.feather", feather_file), + ("books.xml", xml_file), + ] + for s3_key, file_name in test_s3_files: + with open(file_name, "rb") as f: + s3_private_bucket.put_object(Key=s3_key, Body=f) + return s3_private_bucket + + +_compression_formats_params = [ + (".no_compress", None), + ("", None), + (".gz", "gzip"), + (".GZ", "gzip"), + (".bz2", "bz2"), + (".BZ2", "bz2"), + (".zip", "zip"), + (".ZIP", "zip"), + (".xz", "xz"), + (".XZ", "xz"), + pytest.param((".zst", "zstd"), marks=td.skip_if_no("zstandard")), + pytest.param((".ZST", "zstd"), marks=td.skip_if_no("zstandard")), +] + + +@pytest.fixture(params=_compression_formats_params[1:]) +def compression_format(request): + return request.param + + +@pytest.fixture(params=_compression_formats_params) +def compression_ext(request): + return request.param[0] + + +@pytest.fixture( + params=[ + "python", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + ] +) +def string_storage(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + """ + return request.param diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py new file mode 100644 index 0000000000000000000000000000000000000000..db436d8283b9972819f8eff099689cf492d45a83 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py @@ -0,0 +1,289 @@ +import pytest + +from pandas.errors import CSSWarning + +import pandas._testing as tm + +from pandas.io.formats.css import CSSResolver + + +def assert_resolves(css, props, inherited=None): + resolve = CSSResolver() + actual = resolve(css, inherited=inherited) + assert props == actual + + +def assert_same_resolution(css1, css2, inherited=None): + resolve = CSSResolver() + resolved1 = resolve(css1, inherited=inherited) + resolved2 = resolve(css2, inherited=inherited) + assert resolved1 == resolved2 + + +@pytest.mark.parametrize( + "name,norm,abnorm", + [ + ( + "whitespace", + "hello: world; foo: bar", + " \t hello \t :\n world \n ; \n foo: \tbar\n\n", + ), + ("case", "hello: world; foo: bar", "Hello: WORLD; foO: bar"), + ("empty-decl", "hello: world; foo: bar", "; hello: world;; foo: bar;\n; ;"), + ("empty-list", "", ";"), + ], +) +def test_css_parse_normalisation(name, norm, abnorm): + assert_same_resolution(norm, abnorm) + + +@pytest.mark.parametrize( + "invalid_css,remainder", + [ + # No colon + ("hello-world", ""), + ("border-style: solid; hello-world", "border-style: solid"), + ( + "border-style: solid; hello-world; font-weight: bold", + "border-style: solid; font-weight: bold", + ), + # Unclosed string fail + # Invalid size + ("font-size: blah", "font-size: 1em"), + ("font-size: 1a2b", "font-size: 1em"), + ("font-size: 1e5pt", "font-size: 1em"), + ("font-size: 1+6pt", "font-size: 1em"), + ("font-size: 1unknownunit", "font-size: 1em"), + ("font-size: 10", "font-size: 1em"), + ("font-size: 10 pt", "font-size: 1em"), + # Too many args + ("border-top: 1pt solid red green", "border-top: 1pt solid green"), + ], +) +def test_css_parse_invalid(invalid_css, remainder): + with tm.assert_produces_warning(CSSWarning): + assert_same_resolution(invalid_css, remainder) + + +@pytest.mark.parametrize( + "shorthand,expansions", + [ + ("margin", ["margin-top", "margin-right", "margin-bottom", "margin-left"]), + ("padding", ["padding-top", "padding-right", "padding-bottom", "padding-left"]), + ( + "border-width", + [ + "border-top-width", + "border-right-width", + "border-bottom-width", + "border-left-width", + ], + ), + ( + "border-color", + [ + "border-top-color", + "border-right-color", + "border-bottom-color", + "border-left-color", + ], + ), + ( + "border-style", + [ + "border-top-style", + "border-right-style", + "border-bottom-style", + "border-left-style", + ], + ), + ], +) +def test_css_side_shorthands(shorthand, expansions): + top, right, bottom, left = expansions + + assert_resolves( + f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"} + ) + + assert_resolves( + f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"} + ) + + assert_resolves( + f"{shorthand}: 1pt 4pt 2pt", + {top: "1pt", right: "4pt", bottom: "2pt", left: "4pt"}, + ) + + assert_resolves( + f"{shorthand}: 1pt 4pt 2pt 0pt", + {top: "1pt", right: "4pt", bottom: "2pt", left: "0pt"}, + ) + + with tm.assert_produces_warning(CSSWarning): + assert_resolves(f"{shorthand}: 1pt 1pt 1pt 1pt 1pt", {}) + + +@pytest.mark.parametrize( + "shorthand,sides", + [ + ("border-top", ["top"]), + ("border-right", ["right"]), + ("border-bottom", ["bottom"]), + ("border-left", ["left"]), + ("border", ["top", "right", "bottom", "left"]), + ], +) +def test_css_border_shorthand_sides(shorthand, sides): + def create_border_dict(sides, color=None, style=None, width=None): + resolved = {} + for side in sides: + if color: + resolved[f"border-{side}-color"] = color + if style: + resolved[f"border-{side}-style"] = style + if width: + resolved[f"border-{side}-width"] = width + return resolved + + assert_resolves( + f"{shorthand}: 1pt red solid", create_border_dict(sides, "red", "solid", "1pt") + ) + + +@pytest.mark.parametrize( + "prop, expected", + [ + ("1pt red solid", ("red", "solid", "1pt")), + ("red 1pt solid", ("red", "solid", "1pt")), + ("red solid 1pt", ("red", "solid", "1pt")), + ("solid 1pt red", ("red", "solid", "1pt")), + ("red solid", ("red", "solid", "1.500000pt")), + # Note: color=black is not CSS conforming + # (See https://drafts.csswg.org/css-backgrounds/#border-shorthands) + ("1pt solid", ("black", "solid", "1pt")), + ("1pt red", ("red", "none", "1pt")), + ("red", ("red", "none", "1.500000pt")), + ("1pt", ("black", "none", "1pt")), + ("solid", ("black", "solid", "1.500000pt")), + # Sizes + ("1em", ("black", "none", "12pt")), + ], +) +def test_css_border_shorthands(prop, expected): + color, style, width = expected + + assert_resolves( + f"border-left: {prop}", + { + "border-left-color": color, + "border-left-style": style, + "border-left-width": width, + }, + ) + + +@pytest.mark.parametrize( + "style,inherited,equiv", + [ + ("margin: 1px; margin: 2px", "", "margin: 2px"), + ("margin: 1px", "margin: 2px", "margin: 1px"), + ("margin: 1px; margin: inherit", "margin: 2px", "margin: 2px"), + ( + "margin: 1px; margin-top: 2px", + "", + "margin-left: 1px; margin-right: 1px; " + "margin-bottom: 1px; margin-top: 2px", + ), + ("margin-top: 2px", "margin: 1px", "margin: 1px; margin-top: 2px"), + ("margin: 1px", "margin-top: 2px", "margin: 1px"), + ( + "margin: 1px; margin-top: inherit", + "margin: 2px", + "margin: 1px; margin-top: 2px", + ), + ], +) +def test_css_precedence(style, inherited, equiv): + resolve = CSSResolver() + inherited_props = resolve(inherited) + style_props = resolve(style, inherited=inherited_props) + equiv_props = resolve(equiv) + assert style_props == equiv_props + + +@pytest.mark.parametrize( + "style,equiv", + [ + ( + "margin: 1px; margin-top: inherit", + "margin-bottom: 1px; margin-right: 1px; margin-left: 1px", + ), + ("margin-top: inherit", ""), + ("margin-top: initial", ""), + ], +) +def test_css_none_absent(style, equiv): + assert_same_resolution(style, equiv) + + +@pytest.mark.parametrize( + "size,resolved", + [ + ("xx-small", "6pt"), + ("x-small", f"{7.5:f}pt"), + ("small", f"{9.6:f}pt"), + ("medium", "12pt"), + ("large", f"{13.5:f}pt"), + ("x-large", "18pt"), + ("xx-large", "24pt"), + ("8px", "6pt"), + ("1.25pc", "15pt"), + (".25in", "18pt"), + ("02.54cm", "72pt"), + ("25.4mm", "72pt"), + ("101.6q", "72pt"), + ("101.6q", "72pt"), + ], +) +@pytest.mark.parametrize("relative_to", [None, "16pt"]) # invariant to inherited size +def test_css_absolute_font_size(size, relative_to, resolved): + if relative_to is None: + inherited = None + else: + inherited = {"font-size": relative_to} + assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited) + + +@pytest.mark.parametrize( + "size,relative_to,resolved", + [ + ("1em", None, "12pt"), + ("1.0em", None, "12pt"), + ("1.25em", None, "15pt"), + ("1em", "16pt", "16pt"), + ("1.0em", "16pt", "16pt"), + ("1.25em", "16pt", "20pt"), + ("1rem", "16pt", "12pt"), + ("1.0rem", "16pt", "12pt"), + ("1.25rem", "16pt", "15pt"), + ("100%", None, "12pt"), + ("125%", None, "15pt"), + ("100%", "16pt", "16pt"), + ("125%", "16pt", "20pt"), + ("2ex", None, "12pt"), + ("2.0ex", None, "12pt"), + ("2.50ex", None, "15pt"), + ("inherit", "16pt", "16pt"), + ("smaller", None, "10pt"), + ("smaller", "18pt", "15pt"), + ("larger", None, f"{14.4:f}pt"), + ("larger", "15pt", "18pt"), + ], +) +def test_css_relative_font_size(size, relative_to, resolved): + if relative_to is None: + inherited = None + else: + inherited = {"font-size": relative_to} + assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py new file mode 100644 index 0000000000000000000000000000000000000000..0db49a73621eab7fa59a76827a50b862fad41dca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py @@ -0,0 +1,758 @@ +import io +import os +import sys +from zipfile import ZipFile + +from _csv import Error +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + compat, +) +import pandas._testing as tm + + +class TestToCSV: + def test_to_csv_with_single_column(self): + # see gh-18676, https://bugs.python.org/issue32255 + # + # Python's CSV library adds an extraneous '""' + # before the newline when the NaN-value is in + # the first row. Otherwise, only the newline + # character is added. This behavior is inconsistent + # and was patched in https://bugs.python.org/pull_request4672. + df1 = DataFrame([None, 1]) + expected1 = """\ +"" +1.0 +""" + with tm.ensure_clean("test.csv") as path: + df1.to_csv(path, header=None, index=None) + with open(path, encoding="utf-8") as f: + assert f.read() == expected1 + + df2 = DataFrame([1, None]) + expected2 = """\ +1.0 +"" +""" + with tm.ensure_clean("test.csv") as path: + df2.to_csv(path, header=None, index=None) + with open(path, encoding="utf-8") as f: + assert f.read() == expected2 + + def test_to_csv_default_encoding(self): + # GH17097 + df = DataFrame({"col": ["AAAAA", "ÄÄÄÄÄ", "ßßßßß", "聞聞聞聞聞"]}) + + with tm.ensure_clean("test.csv") as path: + # the default to_csv encoding is uft-8. + df.to_csv(path) + tm.assert_frame_equal(pd.read_csv(path, index_col=0), df) + + def test_to_csv_quotechar(self): + df = DataFrame({"col": [1, 2]}) + expected = """\ +"","col" +"0","1" +"1","2" +""" + + with tm.ensure_clean("test.csv") as path: + df.to_csv(path, quoting=1) # 1=QUOTE_ALL + with open(path, encoding="utf-8") as f: + assert f.read() == expected + + expected = """\ +$$,$col$ +$0$,$1$ +$1$,$2$ +""" + + with tm.ensure_clean("test.csv") as path: + df.to_csv(path, quoting=1, quotechar="$") + with open(path, encoding="utf-8") as f: + assert f.read() == expected + + with tm.ensure_clean("test.csv") as path: + with pytest.raises(TypeError, match="quotechar"): + df.to_csv(path, quoting=1, quotechar=None) + + def test_to_csv_doublequote(self): + df = DataFrame({"col": ['a"a', '"bb"']}) + expected = '''\ +"","col" +"0","a""a" +"1","""bb""" +''' + + with tm.ensure_clean("test.csv") as path: + df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL + with open(path, encoding="utf-8") as f: + assert f.read() == expected + + with tm.ensure_clean("test.csv") as path: + with pytest.raises(Error, match="escapechar"): + df.to_csv(path, doublequote=False) # no escapechar set + + def test_to_csv_escapechar(self): + df = DataFrame({"col": ['a"a', '"bb"']}) + expected = """\ +"","col" +"0","a\\"a" +"1","\\"bb\\"" +""" + + with tm.ensure_clean("test.csv") as path: # QUOTE_ALL + df.to_csv(path, quoting=1, doublequote=False, escapechar="\\") + with open(path, encoding="utf-8") as f: + assert f.read() == expected + + df = DataFrame({"col": ["a,a", ",bb,"]}) + expected = """\ +,col +0,a\\,a +1,\\,bb\\, +""" + + with tm.ensure_clean("test.csv") as path: + df.to_csv(path, quoting=3, escapechar="\\") # QUOTE_NONE + with open(path, encoding="utf-8") as f: + assert f.read() == expected + + def test_csv_to_string(self): + df = DataFrame({"col": [1, 2]}) + expected_rows = [",col", "0,1", "1,2"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert df.to_csv() == expected + + def test_to_csv_decimal(self): + # see gh-781 + df = DataFrame({"col1": [1], "col2": ["a"], "col3": [10.1]}) + + expected_rows = [",col1,col2,col3", "0,1,a,10.1"] + expected_default = tm.convert_rows_list_to_csv_str(expected_rows) + assert df.to_csv() == expected_default + + expected_rows = [";col1;col2;col3", "0;1;a;10,1"] + expected_european_excel = tm.convert_rows_list_to_csv_str(expected_rows) + assert df.to_csv(decimal=",", sep=";") == expected_european_excel + + expected_rows = [",col1,col2,col3", "0,1,a,10.10"] + expected_float_format_default = tm.convert_rows_list_to_csv_str(expected_rows) + assert df.to_csv(float_format="%.2f") == expected_float_format_default + + expected_rows = [";col1;col2;col3", "0;1;a;10,10"] + expected_float_format = tm.convert_rows_list_to_csv_str(expected_rows) + assert ( + df.to_csv(decimal=",", sep=";", float_format="%.2f") + == expected_float_format + ) + + # see gh-11553: testing if decimal is taken into account for '0.0' + df = DataFrame({"a": [0, 1.1], "b": [2.2, 3.3], "c": 1}) + + expected_rows = ["a,b,c", "0^0,2^2,1", "1^1,3^3,1"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert df.to_csv(index=False, decimal="^") == expected + + # same but for an index + assert df.set_index("a").to_csv(decimal="^") == expected + + # same for a multi-index + assert df.set_index(["a", "b"]).to_csv(decimal="^") == expected + + def test_to_csv_float_format(self): + # testing if float_format is taken into account for the index + # GH 11553 + df = DataFrame({"a": [0, 1], "b": [2.2, 3.3], "c": 1}) + + expected_rows = ["a,b,c", "0,2.20,1", "1,3.30,1"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert df.set_index("a").to_csv(float_format="%.2f") == expected + + # same for a multi-index + assert df.set_index(["a", "b"]).to_csv(float_format="%.2f") == expected + + def test_to_csv_na_rep(self): + # see gh-11553 + # + # Testing if NaN values are correctly represented in the index. + df = DataFrame({"a": [0, np.nan], "b": [0, 1], "c": [2, 3]}) + expected_rows = ["a,b,c", "0.0,0,2", "_,1,3"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + + assert df.set_index("a").to_csv(na_rep="_") == expected + assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected + + # now with an index containing only NaNs + df = DataFrame({"a": np.nan, "b": [0, 1], "c": [2, 3]}) + expected_rows = ["a,b,c", "_,0,2", "_,1,3"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + + assert df.set_index("a").to_csv(na_rep="_") == expected + assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected + + # check if na_rep parameter does not break anything when no NaN + df = DataFrame({"a": 0, "b": [0, 1], "c": [2, 3]}) + expected_rows = ["a,b,c", "0,0,2", "0,1,3"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + + assert df.set_index("a").to_csv(na_rep="_") == expected + assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected + + csv = pd.Series(["a", pd.NA, "c"]).to_csv(na_rep="ZZZZZ") + expected = tm.convert_rows_list_to_csv_str([",0", "0,a", "1,ZZZZZ", "2,c"]) + assert expected == csv + + def test_to_csv_na_rep_nullable_string(self, nullable_string_dtype): + # GH 29975 + # Make sure full na_rep shows up when a dtype is provided + expected = tm.convert_rows_list_to_csv_str([",0", "0,a", "1,ZZZZZ", "2,c"]) + csv = pd.Series(["a", pd.NA, "c"], dtype=nullable_string_dtype).to_csv( + na_rep="ZZZZZ" + ) + assert expected == csv + + def test_to_csv_date_format(self): + # GH 10209 + df_sec = DataFrame({"A": pd.date_range("20130101", periods=5, freq="s")}) + df_day = DataFrame({"A": pd.date_range("20130101", periods=5, freq="d")}) + + expected_rows = [ + ",A", + "0,2013-01-01 00:00:00", + "1,2013-01-01 00:00:01", + "2,2013-01-01 00:00:02", + "3,2013-01-01 00:00:03", + "4,2013-01-01 00:00:04", + ] + expected_default_sec = tm.convert_rows_list_to_csv_str(expected_rows) + assert df_sec.to_csv() == expected_default_sec + + expected_rows = [ + ",A", + "0,2013-01-01 00:00:00", + "1,2013-01-02 00:00:00", + "2,2013-01-03 00:00:00", + "3,2013-01-04 00:00:00", + "4,2013-01-05 00:00:00", + ] + expected_ymdhms_day = tm.convert_rows_list_to_csv_str(expected_rows) + assert df_day.to_csv(date_format="%Y-%m-%d %H:%M:%S") == expected_ymdhms_day + + expected_rows = [ + ",A", + "0,2013-01-01", + "1,2013-01-01", + "2,2013-01-01", + "3,2013-01-01", + "4,2013-01-01", + ] + expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows) + assert df_sec.to_csv(date_format="%Y-%m-%d") == expected_ymd_sec + + expected_rows = [ + ",A", + "0,2013-01-01", + "1,2013-01-02", + "2,2013-01-03", + "3,2013-01-04", + "4,2013-01-05", + ] + expected_default_day = tm.convert_rows_list_to_csv_str(expected_rows) + assert df_day.to_csv() == expected_default_day + assert df_day.to_csv(date_format="%Y-%m-%d") == expected_default_day + + # see gh-7791 + # + # Testing if date_format parameter is taken into account + # for multi-indexed DataFrames. + df_sec["B"] = 0 + df_sec["C"] = 1 + + expected_rows = ["A,B,C", "2013-01-01,0,1.0"] + expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows) + + df_sec_grouped = df_sec.groupby([pd.Grouper(key="A", freq="1h"), "B"]) + assert df_sec_grouped.mean().to_csv(date_format="%Y-%m-%d") == expected_ymd_sec + + def test_to_csv_different_datetime_formats(self): + # GH#21734 + df = DataFrame( + { + "date": pd.to_datetime("1970-01-01"), + "datetime": pd.date_range("1970-01-01", periods=2, freq="h"), + } + ) + expected_rows = [ + "date,datetime", + "1970-01-01,1970-01-01 00:00:00", + "1970-01-01,1970-01-01 01:00:00", + ] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert df.to_csv(index=False) == expected + + def test_to_csv_date_format_in_categorical(self): + # GH#40754 + ser = pd.Series(pd.to_datetime(["2021-03-27", pd.NaT], format="%Y-%m-%d")) + ser = ser.astype("category") + expected = tm.convert_rows_list_to_csv_str(["0", "2021-03-27", '""']) + assert ser.to_csv(index=False) == expected + + ser = pd.Series( + pd.date_range( + start="2021-03-27", freq="D", periods=1, tz="Europe/Berlin" + ).append(pd.DatetimeIndex([pd.NaT])) + ) + ser = ser.astype("category") + assert ser.to_csv(index=False, date_format="%Y-%m-%d") == expected + + def test_to_csv_float_ea_float_format(self): + # GH#45991 + df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"}) + df["a"] = df["a"].astype("Float64") + result = df.to_csv(index=False, float_format="%.5f") + expected = tm.convert_rows_list_to_csv_str( + ["a,b", "1.10000,c", "2.02000,c", ",c", "6.00001,c"] + ) + assert result == expected + + def test_to_csv_float_ea_no_float_format(self): + # GH#45991 + df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"}) + df["a"] = df["a"].astype("Float64") + result = df.to_csv(index=False) + expected = tm.convert_rows_list_to_csv_str( + ["a,b", "1.1,c", "2.02,c", ",c", "6.000006,c"] + ) + assert result == expected + + def test_to_csv_multi_index(self): + # see gh-6618 + df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]])) + + exp_rows = [",1", ",2", "0,1"] + exp = tm.convert_rows_list_to_csv_str(exp_rows) + assert df.to_csv() == exp + + exp_rows = ["1", "2", "1"] + exp = tm.convert_rows_list_to_csv_str(exp_rows) + assert df.to_csv(index=False) == exp + + df = DataFrame( + [1], + columns=pd.MultiIndex.from_arrays([[1], [2]]), + index=pd.MultiIndex.from_arrays([[1], [2]]), + ) + + exp_rows = [",,1", ",,2", "1,2,1"] + exp = tm.convert_rows_list_to_csv_str(exp_rows) + assert df.to_csv() == exp + + exp_rows = ["1", "2", "1"] + exp = tm.convert_rows_list_to_csv_str(exp_rows) + assert df.to_csv(index=False) == exp + + df = DataFrame([1], columns=pd.MultiIndex.from_arrays([["foo"], ["bar"]])) + + exp_rows = [",foo", ",bar", "0,1"] + exp = tm.convert_rows_list_to_csv_str(exp_rows) + assert df.to_csv() == exp + + exp_rows = ["foo", "bar", "1"] + exp = tm.convert_rows_list_to_csv_str(exp_rows) + assert df.to_csv(index=False) == exp + + @pytest.mark.parametrize( + "ind,expected", + [ + ( + pd.MultiIndex(levels=[[1.0]], codes=[[0]], names=["x"]), + "x,data\n1.0,1\n", + ), + ( + pd.MultiIndex( + levels=[[1.0], [2.0]], codes=[[0], [0]], names=["x", "y"] + ), + "x,y,data\n1.0,2.0,1\n", + ), + ], + ) + def test_to_csv_single_level_multi_index(self, ind, expected, frame_or_series): + # see gh-19589 + obj = frame_or_series(pd.Series([1], ind, name="data")) + + result = obj.to_csv(lineterminator="\n", header=True) + assert result == expected + + def test_to_csv_string_array_ascii(self): + # GH 10813 + str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}] + df = DataFrame(str_array) + expected_ascii = """\ +,names +0,"['foo', 'bar']" +1,"['baz', 'qux']" +""" + with tm.ensure_clean("str_test.csv") as path: + df.to_csv(path, encoding="ascii") + with open(path, encoding="utf-8") as f: + assert f.read() == expected_ascii + + def test_to_csv_string_array_utf8(self): + # GH 10813 + str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}] + df = DataFrame(str_array) + expected_utf8 = """\ +,names +0,"['foo', 'bar']" +1,"['baz', 'qux']" +""" + with tm.ensure_clean("unicode_test.csv") as path: + df.to_csv(path, encoding="utf-8") + with open(path, encoding="utf-8") as f: + assert f.read() == expected_utf8 + + def test_to_csv_string_with_lf(self): + # GH 20353 + data = {"int": [1, 2, 3], "str_lf": ["abc", "d\nef", "g\nh\n\ni"]} + df = DataFrame(data) + with tm.ensure_clean("lf_test.csv") as path: + # case 1: The default line terminator(=os.linesep)(PR 21406) + os_linesep = os.linesep.encode("utf-8") + expected_noarg = ( + b"int,str_lf" + + os_linesep + + b"1,abc" + + os_linesep + + b'2,"d\nef"' + + os_linesep + + b'3,"g\nh\n\ni"' + + os_linesep + ) + df.to_csv(path, index=False) + with open(path, "rb") as f: + assert f.read() == expected_noarg + with tm.ensure_clean("lf_test.csv") as path: + # case 2: LF as line terminator + expected_lf = b'int,str_lf\n1,abc\n2,"d\nef"\n3,"g\nh\n\ni"\n' + df.to_csv(path, lineterminator="\n", index=False) + with open(path, "rb") as f: + assert f.read() == expected_lf + with tm.ensure_clean("lf_test.csv") as path: + # case 3: CRLF as line terminator + # 'lineterminator' should not change inner element + expected_crlf = b'int,str_lf\r\n1,abc\r\n2,"d\nef"\r\n3,"g\nh\n\ni"\r\n' + df.to_csv(path, lineterminator="\r\n", index=False) + with open(path, "rb") as f: + assert f.read() == expected_crlf + + def test_to_csv_string_with_crlf(self): + # GH 20353 + data = {"int": [1, 2, 3], "str_crlf": ["abc", "d\r\nef", "g\r\nh\r\n\r\ni"]} + df = DataFrame(data) + with tm.ensure_clean("crlf_test.csv") as path: + # case 1: The default line terminator(=os.linesep)(PR 21406) + os_linesep = os.linesep.encode("utf-8") + expected_noarg = ( + b"int,str_crlf" + + os_linesep + + b"1,abc" + + os_linesep + + b'2,"d\r\nef"' + + os_linesep + + b'3,"g\r\nh\r\n\r\ni"' + + os_linesep + ) + df.to_csv(path, index=False) + with open(path, "rb") as f: + assert f.read() == expected_noarg + with tm.ensure_clean("crlf_test.csv") as path: + # case 2: LF as line terminator + expected_lf = b'int,str_crlf\n1,abc\n2,"d\r\nef"\n3,"g\r\nh\r\n\r\ni"\n' + df.to_csv(path, lineterminator="\n", index=False) + with open(path, "rb") as f: + assert f.read() == expected_lf + with tm.ensure_clean("crlf_test.csv") as path: + # case 3: CRLF as line terminator + # 'lineterminator' should not change inner element + expected_crlf = ( + b"int,str_crlf\r\n" + b"1,abc\r\n" + b'2,"d\r\nef"\r\n' + b'3,"g\r\nh\r\n\r\ni"\r\n' + ) + df.to_csv(path, lineterminator="\r\n", index=False) + with open(path, "rb") as f: + assert f.read() == expected_crlf + + def test_to_csv_stdout_file(self, capsys): + # GH 21561 + df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["name_1", "name_2"]) + expected_rows = [",name_1,name_2", "0,foo,bar", "1,baz,qux"] + expected_ascii = tm.convert_rows_list_to_csv_str(expected_rows) + + df.to_csv(sys.stdout, encoding="ascii") + captured = capsys.readouterr() + + assert captured.out == expected_ascii + assert not sys.stdout.closed + + @pytest.mark.xfail( + compat.is_platform_windows(), + reason=( + "Especially in Windows, file stream should not be passed" + "to csv writer without newline='' option." + "(https://docs.python.org/3/library/csv.html#csv.writer)" + ), + ) + def test_to_csv_write_to_open_file(self): + # GH 21696 + df = DataFrame({"a": ["x", "y", "z"]}) + expected = """\ +manual header +x +y +z +""" + with tm.ensure_clean("test.txt") as path: + with open(path, "w", encoding="utf-8") as f: + f.write("manual header\n") + df.to_csv(f, header=None, index=None) + with open(path, encoding="utf-8") as f: + assert f.read() == expected + + def test_to_csv_write_to_open_file_with_newline_py3(self): + # see gh-21696 + # see gh-20353 + df = DataFrame({"a": ["x", "y", "z"]}) + expected_rows = ["x", "y", "z"] + expected = "manual header\n" + tm.convert_rows_list_to_csv_str(expected_rows) + with tm.ensure_clean("test.txt") as path: + with open(path, "w", newline="", encoding="utf-8") as f: + f.write("manual header\n") + df.to_csv(f, header=None, index=None) + + with open(path, "rb") as f: + assert f.read() == bytes(expected, "utf-8") + + @pytest.mark.parametrize("to_infer", [True, False]) + @pytest.mark.parametrize("read_infer", [True, False]) + def test_to_csv_compression( + self, compression_only, read_infer, to_infer, compression_to_extension + ): + # see gh-15008 + compression = compression_only + + # We'll complete file extension subsequently. + filename = "test." + filename += compression_to_extension[compression] + + df = DataFrame({"A": [1]}) + + to_compression = "infer" if to_infer else compression + read_compression = "infer" if read_infer else compression + + with tm.ensure_clean(filename) as path: + df.to_csv(path, compression=to_compression) + result = pd.read_csv(path, index_col=0, compression=read_compression) + tm.assert_frame_equal(result, df) + + def test_to_csv_compression_dict(self, compression_only): + # GH 26023 + method = compression_only + df = DataFrame({"ABC": [1]}) + filename = "to_csv_compress_as_dict." + extension = { + "gzip": "gz", + "zstd": "zst", + }.get(method, method) + filename += extension + with tm.ensure_clean(filename) as path: + df.to_csv(path, compression={"method": method}) + read_df = pd.read_csv(path, index_col=0) + tm.assert_frame_equal(read_df, df) + + def test_to_csv_compression_dict_no_method_raises(self): + # GH 26023 + df = DataFrame({"ABC": [1]}) + compression = {"some_option": True} + msg = "must have key 'method'" + + with tm.ensure_clean("out.zip") as path: + with pytest.raises(ValueError, match=msg): + df.to_csv(path, compression=compression) + + @pytest.mark.parametrize("compression", ["zip", "infer"]) + @pytest.mark.parametrize("archive_name", ["test_to_csv.csv", "test_to_csv.zip"]) + def test_to_csv_zip_arguments(self, compression, archive_name): + # GH 26023 + df = DataFrame({"ABC": [1]}) + with tm.ensure_clean("to_csv_archive_name.zip") as path: + df.to_csv( + path, compression={"method": compression, "archive_name": archive_name} + ) + with ZipFile(path) as zp: + assert len(zp.filelist) == 1 + archived_file = zp.filelist[0].filename + assert archived_file == archive_name + + @pytest.mark.parametrize( + "filename,expected_arcname", + [ + ("archive.csv", "archive.csv"), + ("archive.tsv", "archive.tsv"), + ("archive.csv.zip", "archive.csv"), + ("archive.tsv.zip", "archive.tsv"), + ("archive.zip", "archive"), + ], + ) + def test_to_csv_zip_infer_name(self, tmp_path, filename, expected_arcname): + # GH 39465 + df = DataFrame({"ABC": [1]}) + path = tmp_path / filename + df.to_csv(path, compression="zip") + with ZipFile(path) as zp: + assert len(zp.filelist) == 1 + archived_file = zp.filelist[0].filename + assert archived_file == expected_arcname + + @pytest.mark.parametrize("df_new_type", ["Int64"]) + def test_to_csv_na_rep_long_string(self, df_new_type): + # see gh-25099 + df = DataFrame({"c": [float("nan")] * 3}) + df = df.astype(df_new_type) + expected_rows = ["c", "mynull", "mynull", "mynull"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + + result = df.to_csv(index=False, na_rep="mynull", encoding="ascii") + + assert expected == result + + def test_to_csv_timedelta_precision(self): + # GH 6783 + s = pd.Series([1, 1]).astype("timedelta64[ns]") + buf = io.StringIO() + s.to_csv(buf) + result = buf.getvalue() + expected_rows = [ + ",0", + "0,0 days 00:00:00.000000001", + "1,0 days 00:00:00.000000001", + ] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + def test_na_rep_truncated(self): + # https://github.com/pandas-dev/pandas/issues/31447 + result = pd.Series(range(8, 12)).to_csv(na_rep="-") + expected = tm.convert_rows_list_to_csv_str([",0", "0,8", "1,9", "2,10", "3,11"]) + assert result == expected + + result = pd.Series([True, False]).to_csv(na_rep="nan") + expected = tm.convert_rows_list_to_csv_str([",0", "0,True", "1,False"]) + assert result == expected + + result = pd.Series([1.1, 2.2]).to_csv(na_rep=".") + expected = tm.convert_rows_list_to_csv_str([",0", "0,1.1", "1,2.2"]) + assert result == expected + + @pytest.mark.parametrize("errors", ["surrogatepass", "ignore", "replace"]) + def test_to_csv_errors(self, errors): + # GH 22610 + data = ["\ud800foo"] + ser = pd.Series(data, index=Index(data, dtype=object), dtype=object) + with tm.ensure_clean("test.csv") as path: + ser.to_csv(path, errors=errors) + # No use in reading back the data as it is not the same anymore + # due to the error handling + + @pytest.mark.parametrize("mode", ["wb", "w"]) + def test_to_csv_binary_handle(self, mode): + """ + Binary file objects should work (if 'mode' contains a 'b') or even without + it in most cases. + + GH 35058 and GH 19827 + """ + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD")), + index=Index([f"i-{i}" for i in range(30)]), + ) + with tm.ensure_clean() as path: + with open(path, mode="w+b") as handle: + df.to_csv(handle, mode=mode) + tm.assert_frame_equal(df, pd.read_csv(path, index_col=0)) + + @pytest.mark.parametrize("mode", ["wb", "w"]) + def test_to_csv_encoding_binary_handle(self, mode): + """ + Binary file objects should honor a specified encoding. + + GH 23854 and GH 13068 with binary handles + """ + # example from GH 23854 + content = "a, b, 🐟".encode("utf-8-sig") + buffer = io.BytesIO(content) + df = pd.read_csv(buffer, encoding="utf-8-sig") + + buffer = io.BytesIO() + df.to_csv(buffer, mode=mode, encoding="utf-8-sig", index=False) + buffer.seek(0) # tests whether file handle wasn't closed + assert buffer.getvalue().startswith(content) + + # example from GH 13068 + with tm.ensure_clean() as path: + with open(path, "w+b") as handle: + DataFrame().to_csv(handle, mode=mode, encoding="utf-8-sig") + + handle.seek(0) + assert handle.read().startswith(b'\xef\xbb\xbf""') + + +def test_to_csv_iterative_compression_name(compression): + # GH 38714 + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD")), + index=Index([f"i-{i}" for i in range(30)]), + ) + with tm.ensure_clean() as path: + df.to_csv(path, compression=compression, chunksize=1) + tm.assert_frame_equal( + pd.read_csv(path, compression=compression, index_col=0), df + ) + + +def test_to_csv_iterative_compression_buffer(compression): + # GH 38714 + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD")), + index=Index([f"i-{i}" for i in range(30)]), + ) + with io.BytesIO() as buffer: + df.to_csv(buffer, compression=compression, chunksize=1) + buffer.seek(0) + tm.assert_frame_equal( + pd.read_csv(buffer, compression=compression, index_col=0), df + ) + assert not buffer.closed + + +def test_to_csv_pos_args_deprecation(): + # GH-54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_csv except for the " + r"argument 'path_or_buf' will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + buffer = io.BytesIO() + df.to_csv(buffer, ";") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py new file mode 100644 index 0000000000000000000000000000000000000000..927a9f4961f6ff7ae51f74aceb0cb36dc6754c21 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py @@ -0,0 +1,429 @@ +"""Tests formatting as writer-agnostic ExcelCells + +ExcelFormatter is tested implicitly in pandas/tests/io/excel +""" +import string + +import pytest + +from pandas.errors import CSSWarning + +import pandas._testing as tm + +from pandas.io.formats.excel import ( + CssExcelCell, + CSSToExcelConverter, +) + + +@pytest.mark.parametrize( + "css,expected", + [ + # FONT + # - name + ("font-family: foo,bar", {"font": {"name": "foo"}}), + ('font-family: "foo bar",baz', {"font": {"name": "foo bar"}}), + ("font-family: foo,\nbar", {"font": {"name": "foo"}}), + ("font-family: foo, bar, baz", {"font": {"name": "foo"}}), + ("font-family: bar, foo", {"font": {"name": "bar"}}), + ("font-family: 'foo bar', baz", {"font": {"name": "foo bar"}}), + ("font-family: 'foo \\'bar', baz", {"font": {"name": "foo 'bar"}}), + ('font-family: "foo \\"bar", baz', {"font": {"name": 'foo "bar'}}), + ('font-family: "foo ,bar", baz', {"font": {"name": "foo ,bar"}}), + # - family + ("font-family: serif", {"font": {"name": "serif", "family": 1}}), + ("font-family: Serif", {"font": {"name": "serif", "family": 1}}), + ("font-family: roman, serif", {"font": {"name": "roman", "family": 1}}), + ("font-family: roman, sans-serif", {"font": {"name": "roman", "family": 2}}), + ("font-family: roman, sans serif", {"font": {"name": "roman"}}), + ("font-family: roman, sansserif", {"font": {"name": "roman"}}), + ("font-family: roman, cursive", {"font": {"name": "roman", "family": 4}}), + ("font-family: roman, fantasy", {"font": {"name": "roman", "family": 5}}), + # - size + ("font-size: 1em", {"font": {"size": 12}}), + ("font-size: xx-small", {"font": {"size": 6}}), + ("font-size: x-small", {"font": {"size": 7.5}}), + ("font-size: small", {"font": {"size": 9.6}}), + ("font-size: medium", {"font": {"size": 12}}), + ("font-size: large", {"font": {"size": 13.5}}), + ("font-size: x-large", {"font": {"size": 18}}), + ("font-size: xx-large", {"font": {"size": 24}}), + ("font-size: 50%", {"font": {"size": 6}}), + # - bold + ("font-weight: 100", {"font": {"bold": False}}), + ("font-weight: 200", {"font": {"bold": False}}), + ("font-weight: 300", {"font": {"bold": False}}), + ("font-weight: 400", {"font": {"bold": False}}), + ("font-weight: normal", {"font": {"bold": False}}), + ("font-weight: lighter", {"font": {"bold": False}}), + ("font-weight: bold", {"font": {"bold": True}}), + ("font-weight: bolder", {"font": {"bold": True}}), + ("font-weight: 700", {"font": {"bold": True}}), + ("font-weight: 800", {"font": {"bold": True}}), + ("font-weight: 900", {"font": {"bold": True}}), + # - italic + ("font-style: italic", {"font": {"italic": True}}), + ("font-style: oblique", {"font": {"italic": True}}), + # - underline + ("text-decoration: underline", {"font": {"underline": "single"}}), + ("text-decoration: overline", {}), + ("text-decoration: none", {}), + # - strike + ("text-decoration: line-through", {"font": {"strike": True}}), + ( + "text-decoration: underline line-through", + {"font": {"strike": True, "underline": "single"}}, + ), + ( + "text-decoration: underline; text-decoration: line-through", + {"font": {"strike": True}}, + ), + # - color + ("color: red", {"font": {"color": "FF0000"}}), + ("color: #ff0000", {"font": {"color": "FF0000"}}), + ("color: #f0a", {"font": {"color": "FF00AA"}}), + # - shadow + ("text-shadow: none", {"font": {"shadow": False}}), + ("text-shadow: 0px -0em 0px #CCC", {"font": {"shadow": False}}), + ("text-shadow: 0px -0em 0px #999", {"font": {"shadow": False}}), + ("text-shadow: 0px -0em 0px", {"font": {"shadow": False}}), + ("text-shadow: 2px -0em 0px #CCC", {"font": {"shadow": True}}), + ("text-shadow: 0px -2em 0px #CCC", {"font": {"shadow": True}}), + ("text-shadow: 0px -0em 2px #CCC", {"font": {"shadow": True}}), + ("text-shadow: 0px -0em 2px", {"font": {"shadow": True}}), + ("text-shadow: 0px -2em", {"font": {"shadow": True}}), + # FILL + # - color, fillType + ( + "background-color: red", + {"fill": {"fgColor": "FF0000", "patternType": "solid"}}, + ), + ( + "background-color: #ff0000", + {"fill": {"fgColor": "FF0000", "patternType": "solid"}}, + ), + ( + "background-color: #f0a", + {"fill": {"fgColor": "FF00AA", "patternType": "solid"}}, + ), + # BORDER + # - style + ( + "border-style: solid", + { + "border": { + "top": {"style": "medium"}, + "bottom": {"style": "medium"}, + "left": {"style": "medium"}, + "right": {"style": "medium"}, + } + }, + ), + ( + "border-style: solid; border-width: thin", + { + "border": { + "top": {"style": "thin"}, + "bottom": {"style": "thin"}, + "left": {"style": "thin"}, + "right": {"style": "thin"}, + } + }, + ), + ( + "border-top-style: solid; border-top-width: thin", + {"border": {"top": {"style": "thin"}}}, + ), + ( + "border-top-style: solid; border-top-width: 1pt", + {"border": {"top": {"style": "thin"}}}, + ), + ("border-top-style: solid", {"border": {"top": {"style": "medium"}}}), + ( + "border-top-style: solid; border-top-width: medium", + {"border": {"top": {"style": "medium"}}}, + ), + ( + "border-top-style: solid; border-top-width: 2pt", + {"border": {"top": {"style": "medium"}}}, + ), + ( + "border-top-style: solid; border-top-width: thick", + {"border": {"top": {"style": "thick"}}}, + ), + ( + "border-top-style: solid; border-top-width: 4pt", + {"border": {"top": {"style": "thick"}}}, + ), + ( + "border-top-style: dotted", + {"border": {"top": {"style": "mediumDashDotDot"}}}, + ), + ( + "border-top-style: dotted; border-top-width: thin", + {"border": {"top": {"style": "dotted"}}}, + ), + ("border-top-style: dashed", {"border": {"top": {"style": "mediumDashed"}}}), + ( + "border-top-style: dashed; border-top-width: thin", + {"border": {"top": {"style": "dashed"}}}, + ), + ("border-top-style: double", {"border": {"top": {"style": "double"}}}), + # - color + ( + "border-style: solid; border-color: #0000ff", + { + "border": { + "top": {"style": "medium", "color": "0000FF"}, + "right": {"style": "medium", "color": "0000FF"}, + "bottom": {"style": "medium", "color": "0000FF"}, + "left": {"style": "medium", "color": "0000FF"}, + } + }, + ), + ( + "border-top-style: double; border-top-color: blue", + {"border": {"top": {"style": "double", "color": "0000FF"}}}, + ), + ( + "border-top-style: solid; border-top-color: #06c", + {"border": {"top": {"style": "medium", "color": "0066CC"}}}, + ), + ( + "border-top-color: blue", + {"border": {"top": {"color": "0000FF", "style": "none"}}}, + ), + # ALIGNMENT + # - horizontal + ("text-align: center", {"alignment": {"horizontal": "center"}}), + ("text-align: left", {"alignment": {"horizontal": "left"}}), + ("text-align: right", {"alignment": {"horizontal": "right"}}), + ("text-align: justify", {"alignment": {"horizontal": "justify"}}), + # - vertical + ("vertical-align: top", {"alignment": {"vertical": "top"}}), + ("vertical-align: text-top", {"alignment": {"vertical": "top"}}), + ("vertical-align: middle", {"alignment": {"vertical": "center"}}), + ("vertical-align: bottom", {"alignment": {"vertical": "bottom"}}), + ("vertical-align: text-bottom", {"alignment": {"vertical": "bottom"}}), + # - wrap_text + ("white-space: nowrap", {"alignment": {"wrap_text": False}}), + ("white-space: pre", {"alignment": {"wrap_text": False}}), + ("white-space: pre-line", {"alignment": {"wrap_text": False}}), + ("white-space: normal", {"alignment": {"wrap_text": True}}), + # NUMBER FORMAT + ("number-format: 0%", {"number_format": {"format_code": "0%"}}), + ( + "number-format: 0§[Red](0)§-§@;", + {"number_format": {"format_code": "0;[red](0);-;@"}}, # GH 46152 + ), + ], +) +def test_css_to_excel(css, expected): + convert = CSSToExcelConverter() + assert expected == convert(css) + + +def test_css_to_excel_multiple(): + convert = CSSToExcelConverter() + actual = convert( + """ + font-weight: bold; + text-decoration: underline; + color: red; + border-width: thin; + text-align: center; + vertical-align: top; + unused: something; + """ + ) + assert { + "font": {"bold": True, "underline": "single", "color": "FF0000"}, + "border": { + "top": {"style": "thin"}, + "right": {"style": "thin"}, + "bottom": {"style": "thin"}, + "left": {"style": "thin"}, + }, + "alignment": {"horizontal": "center", "vertical": "top"}, + } == actual + + +@pytest.mark.parametrize( + "css,inherited,expected", + [ + ("font-weight: bold", "", {"font": {"bold": True}}), + ("", "font-weight: bold", {"font": {"bold": True}}), + ( + "font-weight: bold", + "font-style: italic", + {"font": {"bold": True, "italic": True}}, + ), + ("font-style: normal", "font-style: italic", {"font": {"italic": False}}), + ("font-style: inherit", "", {}), + ( + "font-style: normal; font-style: inherit", + "font-style: italic", + {"font": {"italic": True}}, + ), + ], +) +def test_css_to_excel_inherited(css, inherited, expected): + convert = CSSToExcelConverter(inherited) + assert expected == convert(css) + + +@pytest.mark.parametrize( + "input_color,output_color", + ( + list(CSSToExcelConverter.NAMED_COLORS.items()) + + [("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()] + + [("#F0F", "FF00FF"), ("#ABC", "AABBCC")] + ), +) +def test_css_to_excel_good_colors(input_color, output_color): + # see gh-18392 + css = ( + f"border-top-color: {input_color}; " + f"border-right-color: {input_color}; " + f"border-bottom-color: {input_color}; " + f"border-left-color: {input_color}; " + f"background-color: {input_color}; " + f"color: {input_color}" + ) + + expected = {} + + expected["fill"] = {"patternType": "solid", "fgColor": output_color} + + expected["font"] = {"color": output_color} + + expected["border"] = { + k: {"color": output_color, "style": "none"} + for k in ("top", "right", "bottom", "left") + } + + with tm.assert_produces_warning(None): + convert = CSSToExcelConverter() + assert expected == convert(css) + + +@pytest.mark.parametrize("input_color", [None, "not-a-color"]) +def test_css_to_excel_bad_colors(input_color): + # see gh-18392 + css = ( + f"border-top-color: {input_color}; " + f"border-right-color: {input_color}; " + f"border-bottom-color: {input_color}; " + f"border-left-color: {input_color}; " + f"background-color: {input_color}; " + f"color: {input_color}" + ) + + expected = {} + + if input_color is not None: + expected["fill"] = {"patternType": "solid"} + + with tm.assert_produces_warning(CSSWarning): + convert = CSSToExcelConverter() + assert expected == convert(css) + + +def tests_css_named_colors_valid(): + upper_hexs = set(map(str.upper, string.hexdigits)) + for color in CSSToExcelConverter.NAMED_COLORS.values(): + assert len(color) == 6 and all(c in upper_hexs for c in color) + + +def test_css_named_colors_from_mpl_present(): + mpl_colors = pytest.importorskip("matplotlib.colors") + + pd_colors = CSSToExcelConverter.NAMED_COLORS + for name, color in mpl_colors.CSS4_COLORS.items(): + assert name in pd_colors and pd_colors[name] == color[1:] + + +@pytest.mark.parametrize( + "styles,expected", + [ + ([("color", "green"), ("color", "red")], "color: red;"), + ([("font-weight", "bold"), ("font-weight", "normal")], "font-weight: normal;"), + ([("text-align", "center"), ("TEXT-ALIGN", "right")], "text-align: right;"), + ], +) +def test_css_excel_cell_precedence(styles, expected): + """It applies favors latter declarations over former declarations""" + # See GH 47371 + converter = CSSToExcelConverter() + converter._call_cached.cache_clear() + css_styles = {(0, 0): styles} + cell = CssExcelCell( + row=0, + col=0, + val="", + style=None, + css_styles=css_styles, + css_row=0, + css_col=0, + css_converter=converter, + ) + converter._call_cached.cache_clear() + + assert cell.style == converter(expected) + + +@pytest.mark.parametrize( + "styles,cache_hits,cache_misses", + [ + ([[("color", "green"), ("color", "red"), ("color", "green")]], 0, 1), + ( + [ + [("font-weight", "bold")], + [("font-weight", "normal"), ("font-weight", "bold")], + ], + 1, + 1, + ), + ([[("text-align", "center")], [("TEXT-ALIGN", "center")]], 1, 1), + ( + [ + [("font-weight", "bold"), ("text-align", "center")], + [("font-weight", "bold"), ("text-align", "left")], + ], + 0, + 2, + ), + ( + [ + [("font-weight", "bold"), ("text-align", "center")], + [("font-weight", "bold"), ("text-align", "left")], + [("font-weight", "bold"), ("text-align", "center")], + ], + 1, + 2, + ), + ], +) +def test_css_excel_cell_cache(styles, cache_hits, cache_misses): + """It caches unique cell styles""" + # See GH 47371 + converter = CSSToExcelConverter() + converter._call_cached.cache_clear() + + css_styles = {(0, i): _style for i, _style in enumerate(styles)} + for css_row, css_col in css_styles: + CssExcelCell( + row=0, + col=0, + val="", + style=None, + css_styles=css_styles, + css_row=css_row, + css_col=css_col, + css_converter=converter, + ) + cache_info = converter._call_cached.cache_info() + converter._call_cached.cache_clear() + + assert cache_info.hits == cache_hits + assert cache_info.misses == cache_misses diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py new file mode 100644 index 0000000000000000000000000000000000000000..9bfd8eb9d51d59ef83c7a4d6fcf8bbeb1ef24025 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py @@ -0,0 +1,342 @@ +""" +self-contained to write legacy storage pickle files + +To use this script. Create an environment where you want +generate pickles, say its for 0.20.3, with your pandas clone +in ~/pandas + +. activate pandas_0.20.3 +cd ~/pandas/pandas + +$ python -m tests.io.generate_legacy_storage_files \ + tests/io/data/legacy_pickle/0.20.3/ pickle + +This script generates a storage file for the current arch, system, +and python version + pandas version: 0.20.3 + output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/ + storage format: pickle +created pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle + +The idea here is you are using the *current* version of the +generate_legacy_storage_files with an *older* version of pandas to +generate a pickle file. We will then check this file into a current +branch, and test using test_pickle.py. This will load the *older* +pickles and test versus the current data that is generated +(with main). These are then compared. + +If we have cases where we changed the signature (e.g. we renamed +offset -> freq in Timestamp). Then we have to conditionally execute +in the generate_legacy_storage_files.py to make it +run under the older AND the newer version. + +""" + +from datetime import timedelta +import os +import pickle +import platform as pl +import sys + +# Remove script directory from path, otherwise Python will try to +# import the JSON test directory as the json module +sys.path.pop(0) + +import numpy as np + +import pandas +from pandas import ( + Categorical, + DataFrame, + Index, + MultiIndex, + NaT, + Period, + RangeIndex, + Series, + Timestamp, + bdate_range, + date_range, + interval_range, + period_range, + timedelta_range, +) +from pandas.arrays import SparseArray + +from pandas.tseries.offsets import ( + FY5253, + BusinessDay, + BusinessHour, + CustomBusinessDay, + DateOffset, + Day, + Easter, + Hour, + LastWeekOfMonth, + Minute, + MonthBegin, + MonthEnd, + QuarterBegin, + QuarterEnd, + SemiMonthBegin, + SemiMonthEnd, + Week, + WeekOfMonth, + YearBegin, + YearEnd, +) + + +def _create_sp_series(): + nan = np.nan + + # nan-based + arr = np.arange(15, dtype=np.float64) + arr[7:12] = nan + arr[-1:] = nan + + bseries = Series(SparseArray(arr, kind="block")) + bseries.name = "bseries" + return bseries + + +def _create_sp_tsseries(): + nan = np.nan + + # nan-based + arr = np.arange(15, dtype=np.float64) + arr[7:12] = nan + arr[-1:] = nan + + date_index = bdate_range("1/1/2011", periods=len(arr)) + bseries = Series(SparseArray(arr, kind="block"), index=date_index) + bseries.name = "btsseries" + return bseries + + +def _create_sp_frame(): + nan = np.nan + + data = { + "A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], + "B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], + "C": np.arange(10).astype(np.int64), + "D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan], + } + + dates = bdate_range("1/1/2011", periods=10) + return DataFrame(data, index=dates).apply(SparseArray) + + +def create_pickle_data(): + """create the pickle data""" + data = { + "A": [0.0, 1.0, 2.0, 3.0, np.nan], + "B": [0, 1, 0, 1, 0], + "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], + "D": date_range("1/1/2009", periods=5), + "E": [0.0, 1, Timestamp("20100101"), "foo", 2.0], + } + + scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")} + + index = { + "int": Index(np.arange(10)), + "date": date_range("20130101", periods=10), + "period": period_range("2013-01-01", freq="M", periods=10), + "float": Index(np.arange(10, dtype=np.float64)), + "uint": Index(np.arange(10, dtype=np.uint64)), + "timedelta": timedelta_range("00:00:00", freq="30min", periods=10), + } + + index["range"] = RangeIndex(10) + + index["interval"] = interval_range(0, periods=10) + + mi = { + "reg2": MultiIndex.from_tuples( + tuple( + zip( + *[ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + ) + ), + names=["first", "second"], + ) + } + + series = { + "float": Series(data["A"]), + "int": Series(data["B"]), + "mixed": Series(data["E"]), + "ts": Series( + np.arange(10).astype(np.int64), index=date_range("20130101", periods=10) + ), + "mi": Series( + np.arange(5).astype(np.float64), + index=MultiIndex.from_tuples( + tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"] + ), + ), + "dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]), + "cat": Series(Categorical(["foo", "bar", "baz"])), + "dt": Series(date_range("20130101", periods=5)), + "dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")), + "period": Series([Period("2000Q1")] * 5), + } + + mixed_dup_df = DataFrame(data) + mixed_dup_df.columns = list("ABCDA") + frame = { + "float": DataFrame({"A": series["float"], "B": series["float"] + 1}), + "int": DataFrame({"A": series["int"], "B": series["int"] + 1}), + "mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}), + "mi": DataFrame( + {"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)}, + index=MultiIndex.from_tuples( + tuple( + zip( + *[ + ["bar", "bar", "baz", "baz", "baz"], + ["one", "two", "one", "two", "three"], + ] + ) + ), + names=["first", "second"], + ), + ), + "dup": DataFrame( + np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"] + ), + "cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}), + "cat_and_float": DataFrame( + { + "A": Categorical(["foo", "bar", "baz"]), + "B": np.arange(3).astype(np.int64), + } + ), + "mixed_dup": mixed_dup_df, + "dt_mixed_tzs": DataFrame( + { + "A": Timestamp("20130102", tz="US/Eastern"), + "B": Timestamp("20130603", tz="CET"), + }, + index=range(5), + ), + "dt_mixed2_tzs": DataFrame( + { + "A": Timestamp("20130102", tz="US/Eastern"), + "B": Timestamp("20130603", tz="CET"), + "C": Timestamp("20130603", tz="UTC"), + }, + index=range(5), + ), + } + + cat = { + "int8": Categorical(list("abcdefg")), + "int16": Categorical(np.arange(1000)), + "int32": Categorical(np.arange(10000)), + } + + timestamp = { + "normal": Timestamp("2011-01-01"), + "nat": NaT, + "tz": Timestamp("2011-01-01", tz="US/Eastern"), + } + + off = { + "DateOffset": DateOffset(years=1), + "DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824), + "BusinessDay": BusinessDay(offset=timedelta(seconds=9)), + "BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"), + "CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"), + "SemiMonthBegin": SemiMonthBegin(day_of_month=9), + "SemiMonthEnd": SemiMonthEnd(day_of_month=24), + "MonthBegin": MonthBegin(1), + "MonthEnd": MonthEnd(1), + "QuarterBegin": QuarterBegin(1), + "QuarterEnd": QuarterEnd(1), + "Day": Day(1), + "YearBegin": YearBegin(1), + "YearEnd": YearEnd(1), + "Week": Week(1), + "Week_Tues": Week(2, normalize=False, weekday=1), + "WeekOfMonth": WeekOfMonth(week=3, weekday=4), + "LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3), + "FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"), + "Easter": Easter(), + "Hour": Hour(1), + "Minute": Minute(1), + } + + return { + "series": series, + "frame": frame, + "index": index, + "scalars": scalars, + "mi": mi, + "sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()}, + "sp_frame": {"float": _create_sp_frame()}, + "cat": cat, + "timestamp": timestamp, + "offsets": off, + } + + +def platform_name(): + return "_".join( + [ + str(pandas.__version__), + str(pl.machine()), + str(pl.system().lower()), + str(pl.python_version()), + ] + ) + + +def write_legacy_pickles(output_dir): + version = pandas.__version__ + + print( + "This script generates a storage file for the current arch, system, " + "and python version" + ) + print(f" pandas version: {version}") + print(f" output dir : {output_dir}") + print(" storage format: pickle") + + pth = f"{platform_name()}.pickle" + + with open(os.path.join(output_dir, pth), "wb") as fh: + pickle.dump(create_pickle_data(), fh, pickle.DEFAULT_PROTOCOL) + + print(f"created pickle file: {pth}") + + +def write_legacy_file(): + # force our cwd to be the first searched + sys.path.insert(0, "") + + if not 3 <= len(sys.argv) <= 4: + sys.exit( + "Specify output directory and storage type: generate_legacy_" + "storage_files.py " + ) + + output_dir = str(sys.argv[1]) + storage_type = str(sys.argv[2]) + + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + if storage_type == "pickle": + write_legacy_pickles(output_dir=output_dir) + else: + sys.exit("storage_type must be one of {'pickle'}") + + +if __name__ == "__main__": + write_legacy_file() diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_clipboard.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_clipboard.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0208fcc74ec83f782e1fedf5e89b40fca3ed69 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_clipboard.py @@ -0,0 +1,423 @@ +from textwrap import dedent + +import numpy as np +import pytest + +from pandas.errors import ( + PyperclipException, + PyperclipWindowsException, +) + +import pandas as pd +from pandas import ( + NA, + DataFrame, + Series, + get_option, + read_clipboard, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +from pandas.io.clipboard import ( + CheckedCall, + _stringifyText, + init_qt_clipboard, +) + + +def build_kwargs(sep, excel): + kwargs = {} + if excel != "default": + kwargs["excel"] = excel + if sep != "default": + kwargs["sep"] = sep + return kwargs + + +@pytest.fixture( + params=[ + "delims", + "utf8", + "utf16", + "string", + "long", + "nonascii", + "colwidth", + "mixed", + "float", + "int", + ] +) +def df(request): + data_type = request.param + + if data_type == "delims": + return DataFrame({"a": ['"a,\t"b|c', "d\tef`"], "b": ["hi'j", "k''lm"]}) + elif data_type == "utf8": + return DataFrame({"a": ["µasd", "Ωœ∑`"], "b": ["øπ∆˚¬", "œ∑`®"]}) + elif data_type == "utf16": + return DataFrame( + {"a": ["\U0001f44d\U0001f44d", "\U0001f44d\U0001f44d"], "b": ["abc", "def"]} + ) + elif data_type == "string": + return DataFrame( + np.array([f"i-{i}" for i in range(15)]).reshape(5, 3), columns=list("abc") + ) + elif data_type == "long": + max_rows = get_option("display.max_rows") + return DataFrame( + np.random.default_rng(2).integers(0, 10, size=(max_rows + 1, 3)), + columns=list("abc"), + ) + elif data_type == "nonascii": + return DataFrame({"en": "in English".split(), "es": "en español".split()}) + elif data_type == "colwidth": + _cw = get_option("display.max_colwidth") + 1 + return DataFrame( + np.array(["x" * _cw for _ in range(15)]).reshape(5, 3), columns=list("abc") + ) + elif data_type == "mixed": + return DataFrame( + { + "a": np.arange(1.0, 6.0) + 0.01, + "b": np.arange(1, 6).astype(np.int64), + "c": list("abcde"), + } + ) + elif data_type == "float": + return DataFrame(np.random.default_rng(2).random((5, 3)), columns=list("abc")) + elif data_type == "int": + return DataFrame( + np.random.default_rng(2).integers(0, 10, (5, 3)), columns=list("abc") + ) + else: + raise ValueError + + +@pytest.fixture +def mock_ctypes(monkeypatch): + """ + Mocks WinError to help with testing the clipboard. + """ + + def _mock_win_error(): + return "Window Error" + + # Set raising to False because WinError won't exist on non-windows platforms + with monkeypatch.context() as m: + m.setattr("ctypes.WinError", _mock_win_error, raising=False) + yield + + +@pytest.mark.usefixtures("mock_ctypes") +def test_checked_call_with_bad_call(monkeypatch): + """ + Give CheckCall a function that returns a falsey value and + mock get_errno so it returns false so an exception is raised. + """ + + def _return_false(): + return False + + monkeypatch.setattr("pandas.io.clipboard.get_errno", lambda: True) + msg = f"Error calling {_return_false.__name__} \\(Window Error\\)" + + with pytest.raises(PyperclipWindowsException, match=msg): + CheckedCall(_return_false)() + + +@pytest.mark.usefixtures("mock_ctypes") +def test_checked_call_with_valid_call(monkeypatch): + """ + Give CheckCall a function that returns a truthy value and + mock get_errno so it returns true so an exception is not raised. + The function should return the results from _return_true. + """ + + def _return_true(): + return True + + monkeypatch.setattr("pandas.io.clipboard.get_errno", lambda: False) + + # Give CheckedCall a callable that returns a truthy value s + checked_call = CheckedCall(_return_true) + assert checked_call() is True + + +@pytest.mark.parametrize( + "text", + [ + "String_test", + True, + 1, + 1.0, + 1j, + ], +) +def test_stringify_text(text): + valid_types = (str, int, float, bool) + + if isinstance(text, valid_types): + result = _stringifyText(text) + assert result == str(text) + else: + msg = ( + "only str, int, float, and bool values " + f"can be copied to the clipboard, not {type(text).__name__}" + ) + with pytest.raises(PyperclipException, match=msg): + _stringifyText(text) + + +@pytest.fixture +def set_pyqt_clipboard(monkeypatch): + qt_cut, qt_paste = init_qt_clipboard() + with monkeypatch.context() as m: + m.setattr(pd.io.clipboard, "clipboard_set", qt_cut) + m.setattr(pd.io.clipboard, "clipboard_get", qt_paste) + yield + + +@pytest.fixture +def clipboard(qapp): + clip = qapp.clipboard() + yield clip + clip.clear() + + +@pytest.mark.single_cpu +@pytest.mark.clipboard +@pytest.mark.usefixtures("set_pyqt_clipboard") +@pytest.mark.usefixtures("clipboard") +class TestClipboard: + # Test that default arguments copy as tab delimited + # Test that explicit delimiters are respected + @pytest.mark.parametrize("sep", [None, "\t", ",", "|"]) + @pytest.mark.parametrize("encoding", [None, "UTF-8", "utf-8", "utf8"]) + def test_round_trip_frame_sep(self, df, sep, encoding): + df.to_clipboard(excel=None, sep=sep, encoding=encoding) + result = read_clipboard(sep=sep or "\t", index_col=0, encoding=encoding) + tm.assert_frame_equal(df, result) + + # Test white space separator + def test_round_trip_frame_string(self, df): + df.to_clipboard(excel=False, sep=None) + result = read_clipboard() + assert df.to_string() == result.to_string() + assert df.shape == result.shape + + # Two character separator is not supported in to_clipboard + # Test that multi-character separators are not silently passed + def test_excel_sep_warning(self, df): + with tm.assert_produces_warning( + UserWarning, + match="to_clipboard in excel mode requires a single character separator.", + check_stacklevel=False, + ): + df.to_clipboard(excel=True, sep=r"\t") + + # Separator is ignored when excel=False and should produce a warning + def test_copy_delim_warning(self, df): + with tm.assert_produces_warning(): + df.to_clipboard(excel=False, sep="\t") + + # Tests that the default behavior of to_clipboard is tab + # delimited and excel="True" + @pytest.mark.parametrize("sep", ["\t", None, "default"]) + @pytest.mark.parametrize("excel", [True, None, "default"]) + def test_clipboard_copy_tabs_default(self, sep, excel, df, clipboard): + kwargs = build_kwargs(sep, excel) + df.to_clipboard(**kwargs) + assert clipboard.text() == df.to_csv(sep="\t") + + # Tests reading of white space separated tables + @pytest.mark.parametrize("sep", [None, "default"]) + def test_clipboard_copy_strings(self, sep, df): + kwargs = build_kwargs(sep, False) + df.to_clipboard(**kwargs) + result = read_clipboard(sep=r"\s+") + assert result.to_string() == df.to_string() + assert df.shape == result.shape + + def test_read_clipboard_infer_excel(self, clipboard): + # gh-19010: avoid warnings + clip_kwargs = {"engine": "python"} + + text = dedent( + """ + John James\tCharlie Mingus + 1\t2 + 4\tHarry Carney + """.strip() + ) + clipboard.setText(text) + df = read_clipboard(**clip_kwargs) + + # excel data is parsed correctly + assert df.iloc[1, 1] == "Harry Carney" + + # having diff tab counts doesn't trigger it + text = dedent( + """ + a\t b + 1 2 + 3 4 + """.strip() + ) + clipboard.setText(text) + res = read_clipboard(**clip_kwargs) + + text = dedent( + """ + a b + 1 2 + 3 4 + """.strip() + ) + clipboard.setText(text) + exp = read_clipboard(**clip_kwargs) + + tm.assert_frame_equal(res, exp) + + def test_infer_excel_with_nulls(self, clipboard): + # GH41108 + text = "col1\tcol2\n1\tred\n\tblue\n2\tgreen" + + clipboard.setText(text) + df = read_clipboard() + df_expected = DataFrame( + data={"col1": [1, None, 2], "col2": ["red", "blue", "green"]} + ) + + # excel data is parsed correctly + tm.assert_frame_equal(df, df_expected) + + @pytest.mark.parametrize( + "multiindex", + [ + ( # Can't use `dedent` here as it will remove the leading `\t` + "\n".join( + [ + "\t\t\tcol1\tcol2", + "A\t0\tTrue\t1\tred", + "A\t1\tTrue\t\tblue", + "B\t0\tFalse\t2\tgreen", + ] + ), + [["A", "A", "B"], [0, 1, 0], [True, True, False]], + ), + ( + "\n".join( + ["\t\tcol1\tcol2", "A\t0\t1\tred", "A\t1\t\tblue", "B\t0\t2\tgreen"] + ), + [["A", "A", "B"], [0, 1, 0]], + ), + ], + ) + def test_infer_excel_with_multiindex(self, clipboard, multiindex): + # GH41108 + + clipboard.setText(multiindex[0]) + df = read_clipboard() + df_expected = DataFrame( + data={"col1": [1, None, 2], "col2": ["red", "blue", "green"]}, + index=multiindex[1], + ) + + # excel data is parsed correctly + tm.assert_frame_equal(df, df_expected) + + def test_invalid_encoding(self, df): + msg = "clipboard only supports utf-8 encoding" + # test case for testing invalid encoding + with pytest.raises(ValueError, match=msg): + df.to_clipboard(encoding="ascii") + with pytest.raises(NotImplementedError, match=msg): + read_clipboard(encoding="ascii") + + @pytest.mark.parametrize("data", ["\U0001f44d...", "Ωœ∑`...", "abcd..."]) + def test_raw_roundtrip(self, data): + # PR #25040 wide unicode wasn't copied correctly on PY3 on windows + df = DataFrame({"data": [data]}) + df.to_clipboard() + result = read_clipboard() + tm.assert_frame_equal(df, result) + + @pytest.mark.parametrize("engine", ["c", "python"]) + def test_read_clipboard_dtype_backend( + self, clipboard, string_storage, dtype_backend, engine + ): + # GH#50502 + if string_storage == "pyarrow" or dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + + if string_storage == "python": + string_array = StringArray(np.array(["x", "y"], dtype=np.object_)) + string_array_na = StringArray(np.array(["x", NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow" and engine != "c": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["x", "y"])) + string_array_na = ArrowExtensionArray(pa.array(["x", None])) + + else: + string_array = ArrowStringArray(pa.array(["x", "y"])) + string_array_na = ArrowStringArray(pa.array(["x", None])) + + text = """a,b,c,d,e,f,g,h,i +x,1,4.0,x,2,4.0,,True,False +y,2,5.0,,,,,False,""" + clipboard.setText(text) + + with pd.option_context("mode.string_storage", string_storage): + result = read_clipboard(sep=",", dtype_backend=dtype_backend, engine=engine) + + expected = DataFrame( + { + "a": string_array, + "b": Series([1, 2], dtype="Int64"), + "c": Series([4.0, 5.0], dtype="Float64"), + "d": string_array_na, + "e": Series([2, NA], dtype="Int64"), + "f": Series([4.0, NA], dtype="Float64"), + "g": Series([NA, NA], dtype="Int64"), + "h": Series([True, False], dtype="boolean"), + "i": Series([False, NA], dtype="boolean"), + } + ) + if dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + expected["g"] = ArrowExtensionArray(pa.array([None, None])) + + tm.assert_frame_equal(result, expected) + + def test_invalid_dtype_backend(self): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + read_clipboard(dtype_backend="numpy") + + def test_to_clipboard_pos_args_deprecation(self): + # GH-54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_clipboard " + r"will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.to_clipboard(True, None) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_common.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..074033868635abf8d62702e4d73f64d4fb742222 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_common.py @@ -0,0 +1,650 @@ +""" +Tests for the pandas.io.common functionalities +""" +import codecs +import errno +from functools import partial +from io import ( + BytesIO, + StringIO, + UnsupportedOperation, +) +import mmap +import os +from pathlib import Path +import pickle +import tempfile + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + +import pandas.io.common as icom + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +class CustomFSPath: + """For testing fspath on unknown objects""" + + def __init__(self, path) -> None: + self.path = path + + def __fspath__(self): + return self.path + + +# Functions that consume a string path and return a string or path-like object +path_types = [str, CustomFSPath, Path] + +try: + from py.path import local as LocalPath + + path_types.append(LocalPath) +except ImportError: + pass + +HERE = os.path.abspath(os.path.dirname(__file__)) + + +# https://github.com/cython/cython/issues/1720 +class TestCommonIOCapabilities: + data1 = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + + def test_expand_user(self): + filename = "~/sometest" + expanded_name = icom._expand_user(filename) + + assert expanded_name != filename + assert os.path.isabs(expanded_name) + assert os.path.expanduser(filename) == expanded_name + + def test_expand_user_normal_path(self): + filename = "/somefolder/sometest" + expanded_name = icom._expand_user(filename) + + assert expanded_name == filename + assert os.path.expanduser(filename) == expanded_name + + def test_stringify_path_pathlib(self): + rel_path = icom.stringify_path(Path(".")) + assert rel_path == "." + redundant_path = icom.stringify_path(Path("foo//bar")) + assert redundant_path == os.path.join("foo", "bar") + + @td.skip_if_no("py.path") + def test_stringify_path_localpath(self): + path = os.path.join("foo", "bar") + abs_path = os.path.abspath(path) + lpath = LocalPath(path) + assert icom.stringify_path(lpath) == abs_path + + def test_stringify_path_fspath(self): + p = CustomFSPath("foo/bar.csv") + result = icom.stringify_path(p) + assert result == "foo/bar.csv" + + def test_stringify_file_and_path_like(self): + # GH 38125: do not stringify file objects that are also path-like + fsspec = pytest.importorskip("fsspec") + with tm.ensure_clean() as path: + with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj: + assert fsspec_obj == icom.stringify_path(fsspec_obj) + + @pytest.mark.parametrize("path_type", path_types) + def test_infer_compression_from_path(self, compression_format, path_type): + extension, expected = compression_format + path = path_type("foo/bar.csv" + extension) + compression = icom.infer_compression(path, compression="infer") + assert compression == expected + + @pytest.mark.parametrize("path_type", [str, CustomFSPath, Path]) + def test_get_handle_with_path(self, path_type): + # ignore LocalPath: it creates strange paths: /absolute/~/sometest + with tempfile.TemporaryDirectory(dir=Path.home()) as tmp: + filename = path_type("~/" + Path(tmp).name + "/sometest") + with icom.get_handle(filename, "w") as handles: + assert Path(handles.handle.name).is_absolute() + assert os.path.expanduser(filename) == handles.handle.name + + def test_get_handle_with_buffer(self): + with StringIO() as input_buffer: + with icom.get_handle(input_buffer, "r") as handles: + assert handles.handle == input_buffer + assert not input_buffer.closed + assert input_buffer.closed + + # Test that BytesIOWrapper(get_handle) returns correct amount of bytes every time + def test_bytesiowrapper_returns_correct_bytes(self): + # Test latin1, ucs-2, and ucs-4 chars + data = """a,b,c +1,2,3 +©,®,® +Look,a snake,🐍""" + with icom.get_handle(StringIO(data), "rb", is_text=False) as handles: + result = b"" + chunksize = 5 + while True: + chunk = handles.handle.read(chunksize) + # Make sure each chunk is correct amount of bytes + assert len(chunk) <= chunksize + if len(chunk) < chunksize: + # Can be less amount of bytes, but only at EOF + # which happens when read returns empty + assert len(handles.handle.read()) == 0 + result += chunk + break + result += chunk + assert result == data.encode("utf-8") + + # Test that pyarrow can handle a file opened with get_handle + def test_get_handle_pyarrow_compat(self): + pa_csv = pytest.importorskip("pyarrow.csv") + + # Test latin1, ucs-2, and ucs-4 chars + data = """a,b,c +1,2,3 +©,®,® +Look,a snake,🐍""" + expected = pd.DataFrame( + {"a": ["1", "©", "Look"], "b": ["2", "®", "a snake"], "c": ["3", "®", "🐍"]} + ) + s = StringIO(data) + with icom.get_handle(s, "rb", is_text=False) as handles: + df = pa_csv.read_csv(handles.handle).to_pandas() + tm.assert_frame_equal(df, expected) + assert not s.closed + + def test_iterator(self): + with pd.read_csv(StringIO(self.data1), chunksize=1) as reader: + result = pd.concat(reader, ignore_index=True) + expected = pd.read_csv(StringIO(self.data1)) + tm.assert_frame_equal(result, expected) + + # GH12153 + with pd.read_csv(StringIO(self.data1), chunksize=1) as it: + first = next(it) + tm.assert_frame_equal(first, expected.iloc[[0]]) + tm.assert_frame_equal(pd.concat(it), expected.iloc[1:]) + + @pytest.mark.parametrize( + "reader, module, error_class, fn_ext", + [ + (pd.read_csv, "os", FileNotFoundError, "csv"), + (pd.read_fwf, "os", FileNotFoundError, "txt"), + (pd.read_excel, "xlrd", FileNotFoundError, "xlsx"), + (pd.read_feather, "pyarrow", OSError, "feather"), + (pd.read_hdf, "tables", FileNotFoundError, "h5"), + (pd.read_stata, "os", FileNotFoundError, "dta"), + (pd.read_sas, "os", FileNotFoundError, "sas7bdat"), + (pd.read_json, "os", FileNotFoundError, "json"), + (pd.read_pickle, "os", FileNotFoundError, "pickle"), + ], + ) + def test_read_non_existent(self, reader, module, error_class, fn_ext): + pytest.importorskip(module) + + path = os.path.join(HERE, "data", "does_not_exist." + fn_ext) + msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist" + msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'" + msg3 = "Expected object or value" + msg4 = "path_or_buf needs to be a string file path or file-like" + msg5 = ( + rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: " + rf"'.+does_not_exist\.{fn_ext}'" + ) + msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'" + msg7 = ( + rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'" + ) + msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}" + + with pytest.raises( + error_class, + match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})", + ): + reader(path) + + @pytest.mark.parametrize( + "method, module, error_class, fn_ext", + [ + (pd.DataFrame.to_csv, "os", OSError, "csv"), + (pd.DataFrame.to_html, "os", OSError, "html"), + (pd.DataFrame.to_excel, "xlrd", OSError, "xlsx"), + (pd.DataFrame.to_feather, "pyarrow", OSError, "feather"), + (pd.DataFrame.to_parquet, "pyarrow", OSError, "parquet"), + (pd.DataFrame.to_stata, "os", OSError, "dta"), + (pd.DataFrame.to_json, "os", OSError, "json"), + (pd.DataFrame.to_pickle, "os", OSError, "pickle"), + ], + ) + # NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables + def test_write_missing_parent_directory(self, method, module, error_class, fn_ext): + pytest.importorskip(module) + + dummy_frame = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]}) + + path = os.path.join(HERE, "data", "missing_folder", "does_not_exist." + fn_ext) + + with pytest.raises( + error_class, + match=r"Cannot save file into a non-existent directory: .*missing_folder", + ): + method(dummy_frame, path) + + @pytest.mark.parametrize( + "reader, module, error_class, fn_ext", + [ + (pd.read_csv, "os", FileNotFoundError, "csv"), + (pd.read_table, "os", FileNotFoundError, "csv"), + (pd.read_fwf, "os", FileNotFoundError, "txt"), + (pd.read_excel, "xlrd", FileNotFoundError, "xlsx"), + (pd.read_feather, "pyarrow", OSError, "feather"), + (pd.read_hdf, "tables", FileNotFoundError, "h5"), + (pd.read_stata, "os", FileNotFoundError, "dta"), + (pd.read_sas, "os", FileNotFoundError, "sas7bdat"), + (pd.read_json, "os", FileNotFoundError, "json"), + (pd.read_pickle, "os", FileNotFoundError, "pickle"), + ], + ) + def test_read_expands_user_home_dir( + self, reader, module, error_class, fn_ext, monkeypatch + ): + pytest.importorskip(module) + + path = os.path.join("~", "does_not_exist." + fn_ext) + monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x)) + + msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist" + msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'" + msg3 = "Unexpected character found when decoding 'false'" + msg4 = "path_or_buf needs to be a string file path or file-like" + msg5 = ( + rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: " + rf"'.+does_not_exist\.{fn_ext}'" + ) + msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'" + msg7 = ( + rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'" + ) + msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}" + + with pytest.raises( + error_class, + match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})", + ): + reader(path) + + @pytest.mark.parametrize( + "reader, module, path", + [ + (pd.read_csv, "os", ("io", "data", "csv", "iris.csv")), + (pd.read_table, "os", ("io", "data", "csv", "iris.csv")), + ( + pd.read_fwf, + "os", + ("io", "data", "fixed_width", "fixed_width_format.txt"), + ), + (pd.read_excel, "xlrd", ("io", "data", "excel", "test1.xlsx")), + ( + pd.read_feather, + "pyarrow", + ("io", "data", "feather", "feather-0_3_1.feather"), + ), + ( + pd.read_hdf, + "tables", + ("io", "data", "legacy_hdf", "datetimetz_object.h5"), + ), + (pd.read_stata, "os", ("io", "data", "stata", "stata10_115.dta")), + (pd.read_sas, "os", ("io", "sas", "data", "test1.sas7bdat")), + (pd.read_json, "os", ("io", "json", "data", "tsframe_v012.json")), + ( + pd.read_pickle, + "os", + ("io", "data", "pickle", "categorical.0.25.0.pickle"), + ), + ], + ) + def test_read_fspath_all(self, reader, module, path, datapath): + pytest.importorskip(module) + path = datapath(*path) + + mypath = CustomFSPath(path) + result = reader(mypath) + expected = reader(path) + + if path.endswith(".pickle"): + # categorical + tm.assert_categorical_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "writer_name, writer_kwargs, module", + [ + ("to_csv", {}, "os"), + ("to_excel", {"engine": "openpyxl"}, "openpyxl"), + ("to_feather", {}, "pyarrow"), + ("to_html", {}, "os"), + ("to_json", {}, "os"), + ("to_latex", {}, "os"), + ("to_pickle", {}, "os"), + ("to_stata", {"time_stamp": pd.to_datetime("2019-01-01 00:00")}, "os"), + ], + ) + def test_write_fspath_all(self, writer_name, writer_kwargs, module): + if writer_name in ["to_latex"]: # uses Styler implementation + pytest.importorskip("jinja2") + p1 = tm.ensure_clean("string") + p2 = tm.ensure_clean("fspath") + df = pd.DataFrame({"A": [1, 2]}) + + with p1 as string, p2 as fspath: + pytest.importorskip(module) + mypath = CustomFSPath(fspath) + writer = getattr(df, writer_name) + + writer(string, **writer_kwargs) + writer(mypath, **writer_kwargs) + with open(string, "rb") as f_str, open(fspath, "rb") as f_path: + if writer_name == "to_excel": + # binary representation of excel contains time creation + # data that causes flaky CI failures + result = pd.read_excel(f_str, **writer_kwargs) + expected = pd.read_excel(f_path, **writer_kwargs) + tm.assert_frame_equal(result, expected) + else: + result = f_str.read() + expected = f_path.read() + assert result == expected + + def test_write_fspath_hdf5(self): + # Same test as write_fspath_all, except HDF5 files aren't + # necessarily byte-for-byte identical for a given dataframe, so we'll + # have to read and compare equality + pytest.importorskip("tables") + + df = pd.DataFrame({"A": [1, 2]}) + p1 = tm.ensure_clean("string") + p2 = tm.ensure_clean("fspath") + + with p1 as string, p2 as fspath: + mypath = CustomFSPath(fspath) + df.to_hdf(mypath, key="bar") + df.to_hdf(string, key="bar") + + result = pd.read_hdf(fspath, key="bar") + expected = pd.read_hdf(string, key="bar") + + tm.assert_frame_equal(result, expected) + + +@pytest.fixture +def mmap_file(datapath): + return datapath("io", "data", "csv", "test_mmap.csv") + + +class TestMMapWrapper: + def test_constructor_bad_file(self, mmap_file): + non_file = StringIO("I am not a file") + non_file.fileno = lambda: -1 + + # the error raised is different on Windows + if is_platform_windows(): + msg = "The parameter is incorrect" + err = OSError + else: + msg = "[Errno 22]" + err = mmap.error + + with pytest.raises(err, match=msg): + icom._maybe_memory_map(non_file, True) + + with open(mmap_file, encoding="utf-8") as target: + pass + + msg = "I/O operation on closed file" + with pytest.raises(ValueError, match=msg): + icom._maybe_memory_map(target, True) + + def test_next(self, mmap_file): + with open(mmap_file, encoding="utf-8") as target: + lines = target.readlines() + + with icom.get_handle( + target, "r", is_text=True, memory_map=True + ) as wrappers: + wrapper = wrappers.handle + assert isinstance(wrapper.buffer.buffer, mmap.mmap) + + for line in lines: + next_line = next(wrapper) + assert next_line.strip() == line.strip() + + with pytest.raises(StopIteration, match=r"^$"): + next(wrapper) + + def test_unknown_engine(self): + with tm.ensure_clean() as path: + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_csv(path) + with pytest.raises(ValueError, match="Unknown engine"): + pd.read_csv(path, engine="pyt") + + def test_binary_mode(self): + """ + 'encoding' shouldn't be passed to 'open' in binary mode. + + GH 35058 + """ + with tm.ensure_clean() as path: + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_csv(path, mode="w+b") + tm.assert_frame_equal(df, pd.read_csv(path, index_col=0)) + + @pytest.mark.parametrize("encoding", ["utf-16", "utf-32"]) + @pytest.mark.parametrize("compression_", ["bz2", "xz"]) + def test_warning_missing_utf_bom(self, encoding, compression_): + """ + bz2 and xz do not write the byte order mark (BOM) for utf-16/32. + + https://stackoverflow.com/questions/55171439 + + GH 35681 + """ + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with tm.ensure_clean() as path: + with tm.assert_produces_warning(UnicodeWarning): + df.to_csv(path, compression=compression_, encoding=encoding) + + # reading should fail (otherwise we wouldn't need the warning) + msg = r"UTF-\d+ stream does not start with BOM" + with pytest.raises(UnicodeError, match=msg): + pd.read_csv(path, compression=compression_, encoding=encoding) + + +def test_is_fsspec_url(): + assert icom.is_fsspec_url("gcs://pandas/somethingelse.com") + assert icom.is_fsspec_url("gs://pandas/somethingelse.com") + # the following is the only remote URL that is handled without fsspec + assert not icom.is_fsspec_url("http://pandas/somethingelse.com") + assert not icom.is_fsspec_url("random:pandas/somethingelse.com") + assert not icom.is_fsspec_url("/local/path") + assert not icom.is_fsspec_url("relative/local/path") + # fsspec URL in string should not be recognized + assert not icom.is_fsspec_url("this is not fsspec://url") + assert not icom.is_fsspec_url("{'url': 'gs://pandas/somethingelse.com'}") + # accept everything that conforms to RFC 3986 schema + assert icom.is_fsspec_url("RFC-3986+compliant.spec://something") + + +@pytest.mark.parametrize("encoding", [None, "utf-8"]) +@pytest.mark.parametrize("format", ["csv", "json"]) +def test_codecs_encoding(encoding, format): + # GH39247 + expected = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with tm.ensure_clean() as path: + with codecs.open(path, mode="w", encoding=encoding) as handle: + getattr(expected, f"to_{format}")(handle) + with codecs.open(path, mode="r", encoding=encoding) as handle: + if format == "csv": + df = pd.read_csv(handle, index_col=0) + else: + df = pd.read_json(handle) + tm.assert_frame_equal(expected, df) + + +def test_codecs_get_writer_reader(): + # GH39247 + expected = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with tm.ensure_clean() as path: + with open(path, "wb") as handle: + with codecs.getwriter("utf-8")(handle) as encoded: + expected.to_csv(encoded) + with open(path, "rb") as handle: + with codecs.getreader("utf-8")(handle) as encoded: + df = pd.read_csv(encoded, index_col=0) + tm.assert_frame_equal(expected, df) + + +@pytest.mark.parametrize( + "io_class,mode,msg", + [ + (BytesIO, "t", "a bytes-like object is required, not 'str'"), + (StringIO, "b", "string argument expected, got 'bytes'"), + ], +) +def test_explicit_encoding(io_class, mode, msg): + # GH39247; this test makes sure that if a user provides mode="*t" or "*b", + # it is used. In the case of this test it leads to an error as intentionally the + # wrong mode is requested + expected = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with io_class() as buffer: + with pytest.raises(TypeError, match=msg): + expected.to_csv(buffer, mode=f"w{mode}") + + +@pytest.mark.parametrize("encoding_errors", [None, "strict", "replace"]) +@pytest.mark.parametrize("format", ["csv", "json"]) +def test_encoding_errors(encoding_errors, format): + # GH39450 + msg = "'utf-8' codec can't decode byte" + bad_encoding = b"\xe4" + + if format == "csv": + content = b"," + bad_encoding + b"\n" + bad_encoding * 2 + b"," + bad_encoding + reader = partial(pd.read_csv, index_col=0) + else: + content = ( + b'{"' + + bad_encoding * 2 + + b'": {"' + + bad_encoding + + b'":"' + + bad_encoding + + b'"}}' + ) + reader = partial(pd.read_json, orient="index") + with tm.ensure_clean() as path: + file = Path(path) + file.write_bytes(content) + + if encoding_errors != "replace": + with pytest.raises(UnicodeDecodeError, match=msg): + reader(path, encoding_errors=encoding_errors) + else: + df = reader(path, encoding_errors=encoding_errors) + decoded = bad_encoding.decode(errors=encoding_errors) + expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2]) + tm.assert_frame_equal(df, expected) + + +def test_bad_encdoing_errors(): + # GH 39777 + with tm.ensure_clean() as path: + with pytest.raises(LookupError, match="unknown error handler name"): + icom.get_handle(path, "w", errors="bad") + + +def test_errno_attribute(): + # GH 13872 + with pytest.raises(FileNotFoundError, match="\\[Errno 2\\]") as err: + pd.read_csv("doesnt_exist") + assert err.errno == errno.ENOENT + + +def test_fail_mmap(): + with pytest.raises(UnsupportedOperation, match="fileno"): + with BytesIO() as buffer: + icom.get_handle(buffer, "rb", memory_map=True) + + +def test_close_on_error(): + # GH 47136 + class TestError: + def close(self): + raise OSError("test") + + with pytest.raises(OSError, match="test"): + with BytesIO() as buffer: + with icom.get_handle(buffer, "rb") as handles: + handles.created_handles.append(TestError()) + + +@pytest.mark.parametrize( + "reader", + [ + pd.read_csv, + pd.read_fwf, + pd.read_excel, + pd.read_feather, + pd.read_hdf, + pd.read_stata, + pd.read_sas, + pd.read_json, + pd.read_pickle, + ], +) +def test_pickle_reader(reader): + # GH 22265 + with BytesIO() as buffer: + pickle.dump(reader, buffer) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_compression.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..3a58dda9e8dc47f2072e0175c120036523ed83f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_compression.py @@ -0,0 +1,378 @@ +import gzip +import io +import os +from pathlib import Path +import subprocess +import sys +import tarfile +import textwrap +import time +import zipfile + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +import pandas._testing as tm + +import pandas.io.common as icom + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"]) +def test_compression_size(obj, method, compression_only): + if compression_only == "tar": + compression_only = {"method": "tar", "mode": "w:gz"} + + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression=compression_only) + compressed_size = os.path.getsize(path) + getattr(obj, method)(path, compression=None) + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_csv", "to_json"]) +def test_compression_size_fh(obj, method, compression_only): + with tm.ensure_clean() as path: + with icom.get_handle( + path, + "w:gz" if compression_only == "tar" else "w", + compression=compression_only, + ) as handles: + getattr(obj, method)(handles.handle) + assert not handles.handle.closed + compressed_size = os.path.getsize(path) + with tm.ensure_clean() as path: + with icom.get_handle(path, "w", compression=None) as handles: + getattr(obj, method)(handles.handle) + assert not handles.handle.closed + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size + + +@pytest.mark.parametrize( + "write_method, write_kwargs, read_method", + [ + ("to_csv", {"index": False}, pd.read_csv), + ("to_json", {}, pd.read_json), + ("to_pickle", {}, pd.read_pickle), + ], +) +def test_dataframe_compression_defaults_to_infer( + write_method, write_kwargs, read_method, compression_only, compression_to_extension +): + # GH22004 + input = pd.DataFrame([[1.0, 0, -4], [3.4, 5, 2]], columns=["X", "Y", "Z"]) + extension = compression_to_extension[compression_only] + with tm.ensure_clean("compressed" + extension) as path: + getattr(input, write_method)(path, **write_kwargs) + output = read_method(path, compression=compression_only) + tm.assert_frame_equal(output, input) + + +@pytest.mark.parametrize( + "write_method,write_kwargs,read_method,read_kwargs", + [ + ("to_csv", {"index": False, "header": True}, pd.read_csv, {"squeeze": True}), + ("to_json", {}, pd.read_json, {"typ": "series"}), + ("to_pickle", {}, pd.read_pickle, {}), + ], +) +def test_series_compression_defaults_to_infer( + write_method, + write_kwargs, + read_method, + read_kwargs, + compression_only, + compression_to_extension, +): + # GH22004 + input = pd.Series([0, 5, -2, 10], name="X") + extension = compression_to_extension[compression_only] + with tm.ensure_clean("compressed" + extension) as path: + getattr(input, write_method)(path, **write_kwargs) + if "squeeze" in read_kwargs: + kwargs = read_kwargs.copy() + del kwargs["squeeze"] + output = read_method(path, compression=compression_only, **kwargs).squeeze( + "columns" + ) + else: + output = read_method(path, compression=compression_only, **read_kwargs) + tm.assert_series_equal(output, input, check_names=False) + + +def test_compression_warning(compression_only): + # Assert that passing a file object to to_csv while explicitly specifying a + # compression protocol triggers a RuntimeWarning, as per GH21227. + df = pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ) + with tm.ensure_clean() as path: + with icom.get_handle(path, "w", compression=compression_only) as handles: + with tm.assert_produces_warning(RuntimeWarning): + df.to_csv(handles.handle, compression=compression_only) + + +def test_compression_binary(compression_only): + """ + Binary file handles support compression. + + GH22555 + """ + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # with a file + with tm.ensure_clean() as path: + with open(path, mode="wb") as file: + df.to_csv(file, mode="wb", compression=compression_only) + file.seek(0) # file shouldn't be closed + tm.assert_frame_equal( + df, pd.read_csv(path, index_col=0, compression=compression_only) + ) + + # with BytesIO + file = io.BytesIO() + df.to_csv(file, mode="wb", compression=compression_only) + file.seek(0) # file shouldn't be closed + tm.assert_frame_equal( + df, pd.read_csv(file, index_col=0, compression=compression_only) + ) + + +def test_gzip_reproducibility_file_name(): + """ + Gzip should create reproducible archives with mtime. + + Note: Archives created with different filenames will still be different! + + GH 28103 + """ + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + compression_options = {"method": "gzip", "mtime": 1} + + # test for filename + with tm.ensure_clean() as path: + path = Path(path) + df.to_csv(path, compression=compression_options) + time.sleep(0.1) + output = path.read_bytes() + df.to_csv(path, compression=compression_options) + assert output == path.read_bytes() + + +def test_gzip_reproducibility_file_object(): + """ + Gzip should create reproducible archives with mtime. + + GH 28103 + """ + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + compression_options = {"method": "gzip", "mtime": 1} + + # test for file object + buffer = io.BytesIO() + df.to_csv(buffer, compression=compression_options, mode="wb") + output = buffer.getvalue() + time.sleep(0.1) + buffer = io.BytesIO() + df.to_csv(buffer, compression=compression_options, mode="wb") + assert output == buffer.getvalue() + + +@pytest.mark.single_cpu +def test_with_missing_lzma(): + """Tests if import pandas works when lzma is not present.""" + # https://github.com/pandas-dev/pandas/issues/27575 + code = textwrap.dedent( + """\ + import sys + sys.modules['lzma'] = None + import pandas + """ + ) + subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE) + + +@pytest.mark.single_cpu +def test_with_missing_lzma_runtime(): + """Tests if RuntimeError is hit when calling lzma without + having the module available. + """ + code = textwrap.dedent( + """ + import sys + import pytest + sys.modules['lzma'] = None + import pandas as pd + df = pd.DataFrame() + with pytest.raises(RuntimeError, match='lzma module'): + df.to_csv('foo.csv', compression='xz') + """ + ) + subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE) + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"]) +def test_gzip_compression_level(obj, method): + # GH33196 + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression="gzip") + compressed_size_default = os.path.getsize(path) + getattr(obj, method)(path, compression={"method": "gzip", "compresslevel": 1}) + compressed_size_fast = os.path.getsize(path) + assert compressed_size_default < compressed_size_fast + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"]) +def test_xz_compression_level_read(obj, method): + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression="xz") + compressed_size_default = os.path.getsize(path) + getattr(obj, method)(path, compression={"method": "xz", "preset": 1}) + compressed_size_fast = os.path.getsize(path) + assert compressed_size_default < compressed_size_fast + if method == "to_csv": + pd.read_csv(path, compression="xz") + + +@pytest.mark.parametrize( + "obj", + [ + pd.DataFrame( + 100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + columns=["X", "Y", "Z"], + ), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"), + ], +) +@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"]) +def test_bzip_compression_level(obj, method): + """GH33196 bzip needs file size > 100k to show a size difference between + compression levels, so here we just check if the call works when + compression is passed as a dict. + """ + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression={"method": "bz2", "compresslevel": 1}) + + +@pytest.mark.parametrize( + "suffix,archive", + [ + (".zip", zipfile.ZipFile), + (".tar", tarfile.TarFile), + ], +) +def test_empty_archive_zip(suffix, archive): + with tm.ensure_clean(filename=suffix) as path: + with archive(path, "w"): + pass + with pytest.raises(ValueError, match="Zero files found"): + pd.read_csv(path) + + +def test_ambiguous_archive_zip(): + with tm.ensure_clean(filename=".zip") as path: + with zipfile.ZipFile(path, "w") as file: + file.writestr("a.csv", "foo,bar") + file.writestr("b.csv", "foo,bar") + with pytest.raises(ValueError, match="Multiple files found in ZIP file"): + pd.read_csv(path) + + +def test_ambiguous_archive_tar(tmp_path): + csvAPath = tmp_path / "a.csv" + with open(csvAPath, "w", encoding="utf-8") as a: + a.write("foo,bar\n") + csvBPath = tmp_path / "b.csv" + with open(csvBPath, "w", encoding="utf-8") as b: + b.write("foo,bar\n") + + tarpath = tmp_path / "archive.tar" + with tarfile.TarFile(tarpath, "w") as tar: + tar.add(csvAPath, "a.csv") + tar.add(csvBPath, "b.csv") + + with pytest.raises(ValueError, match="Multiple files found in TAR archive"): + pd.read_csv(tarpath) + + +def test_tar_gz_to_different_filename(): + with tm.ensure_clean(filename=".foo") as file: + pd.DataFrame( + [["1", "2"]], + columns=["foo", "bar"], + ).to_csv(file, compression={"method": "tar", "mode": "w:gz"}, index=False) + with gzip.open(file) as uncompressed: + with tarfile.TarFile(fileobj=uncompressed) as archive: + members = archive.getmembers() + assert len(members) == 1 + content = archive.extractfile(members[0]).read().decode("utf8") + + if is_platform_windows(): + expected = "foo,bar\r\n1,2\r\n" + else: + expected = "foo,bar\n1,2\n" + + assert content == expected + + +def test_tar_no_error_on_close(): + with io.BytesIO() as buffer: + with icom._BytesTarFile(fileobj=buffer, mode="w"): + pass diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_feather.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_feather.py new file mode 100644 index 0000000000000000000000000000000000000000..22a7d3b83a459a5dc48ee5d56c2f70130d644be4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_feather.py @@ -0,0 +1,252 @@ +""" test feather-format compat """ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +from pandas.io.feather_format import read_feather, to_feather # isort:skip + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +pa = pytest.importorskip("pyarrow") + + +@pytest.mark.single_cpu +class TestFeather: + def check_error_on_write(self, df, exc, err_msg): + # check that we are raising the exception + # on writing + + with pytest.raises(exc, match=err_msg): + with tm.ensure_clean() as path: + to_feather(df, path) + + def check_external_error_on_write(self, df): + # check that we are raising the exception + # on writing + + with tm.external_error_raised(Exception): + with tm.ensure_clean() as path: + to_feather(df, path) + + def check_round_trip(self, df, expected=None, write_kwargs={}, **read_kwargs): + if expected is None: + expected = df.copy() + + with tm.ensure_clean() as path: + to_feather(df, path, **write_kwargs) + + result = read_feather(path, **read_kwargs) + + tm.assert_frame_equal(result, expected) + + def test_error(self): + msg = "feather only support IO with DataFrames" + for obj in [ + pd.Series([1, 2, 3]), + 1, + "foo", + pd.Timestamp("20130101"), + np.array([1, 2, 3]), + ]: + self.check_error_on_write(obj, ValueError, msg) + + def test_basic(self): + df = pd.DataFrame( + { + "string": list("abc"), + "int": list(range(1, 4)), + "uint": np.arange(3, 6).astype("u1"), + "float": np.arange(4.0, 7.0, dtype="float64"), + "float_with_null": [1.0, np.nan, 3], + "bool": [True, False, True], + "bool_with_null": [True, np.nan, False], + "cat": pd.Categorical(list("abc")), + "dt": pd.DatetimeIndex( + list(pd.date_range("20130101", periods=3)), freq=None + ), + "dttz": pd.DatetimeIndex( + list(pd.date_range("20130101", periods=3, tz="US/Eastern")), + freq=None, + ), + "dt_with_null": [ + pd.Timestamp("20130101"), + pd.NaT, + pd.Timestamp("20130103"), + ], + "dtns": pd.DatetimeIndex( + list(pd.date_range("20130101", periods=3, freq="ns")), freq=None + ), + } + ) + df["periods"] = pd.period_range("2013", freq="M", periods=3) + df["timedeltas"] = pd.timedelta_range("1 day", periods=3) + df["intervals"] = pd.interval_range(0, 3, 3) + + assert df.dttz.dtype.tz.zone == "US/Eastern" + + expected = df.copy() + expected.loc[1, "bool_with_null"] = None + self.check_round_trip(df, expected=expected) + + def test_duplicate_columns(self): + # https://github.com/wesm/feather/issues/53 + # not currently able to handle duplicate columns + df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy() + self.check_external_error_on_write(df) + + def test_read_columns(self): + # GH 24025 + df = pd.DataFrame( + { + "col1": list("abc"), + "col2": list(range(1, 4)), + "col3": list("xyz"), + "col4": list(range(4, 7)), + } + ) + columns = ["col1", "col3"] + self.check_round_trip(df, expected=df[columns], columns=columns) + + def test_read_columns_different_order(self): + # GH 33878 + df = pd.DataFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]}) + expected = df[["B", "A"]] + self.check_round_trip(df, expected, columns=["B", "A"]) + + def test_unsupported_other(self): + # mixed python objects + df = pd.DataFrame({"a": ["a", 1, 2.0]}) + self.check_external_error_on_write(df) + + def test_rw_use_threads(self): + df = pd.DataFrame({"A": np.arange(100000)}) + self.check_round_trip(df, use_threads=True) + self.check_round_trip(df, use_threads=False) + + def test_path_pathlib(self): + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ).reset_index() + result = tm.round_trip_pathlib(df.to_feather, read_feather) + tm.assert_frame_equal(df, result) + + def test_path_localpath(self): + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ).reset_index() + result = tm.round_trip_localpath(df.to_feather, read_feather) + tm.assert_frame_equal(df, result) + + def test_passthrough_keywords(self): + df = pd.DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ).reset_index() + self.check_round_trip(df, write_kwargs={"version": 1}) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_http_path(self, feather_file, httpserver): + # GH 29055 + expected = read_feather(feather_file) + with open(feather_file, "rb") as f: + httpserver.serve_content(content=f.read()) + res = read_feather(httpserver.url) + tm.assert_frame_equal(expected, res) + + def test_read_feather_dtype_backend(self, string_storage, dtype_backend): + # GH#50765 + df = pd.DataFrame( + { + "a": pd.Series([1, np.nan, 3], dtype="Int64"), + "b": pd.Series([1, 2, 3], dtype="Int64"), + "c": pd.Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": pd.Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": [True, False, None], + "f": [True, False, True], + "g": ["a", "b", "c"], + "h": ["a", "b", None], + } + ) + + if string_storage == "python": + string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) + string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) + + else: + string_array = ArrowStringArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArray(pa.array(["a", "b", None])) + + with tm.ensure_clean() as path: + to_feather(df, path) + with pd.option_context("mode.string_storage", string_storage): + result = read_feather(path, dtype_backend=dtype_backend) + + expected = pd.DataFrame( + { + "a": pd.Series([1, np.nan, 3], dtype="Int64"), + "b": pd.Series([1, 2, 3], dtype="Int64"), + "c": pd.Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": pd.Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": pd.Series([True, False, pd.NA], dtype="boolean"), + "f": pd.Series([True, False, True], dtype="boolean"), + "g": string_array, + "h": string_array_na, + } + ) + + if dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + expected = pd.DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + + tm.assert_frame_equal(result, expected) + + def test_int_columns_and_index(self): + df = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index([3, 4, 5], name="test")) + self.check_round_trip(df) + + def test_invalid_dtype_backend(self): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + df = pd.DataFrame({"int": list(range(1, 4))}) + with tm.ensure_clean("tmp.feather") as path: + df.to_feather(path) + with pytest.raises(ValueError, match=msg): + read_feather(path, dtype_backend="numpy") + + def test_string_inference(self, tmp_path): + # GH#54431 + path = tmp_path / "test_string_inference.p" + df = pd.DataFrame(data={"a": ["x", "y"]}) + df.to_feather(path) + with pd.option_context("future.infer_string", True): + result = read_feather(path) + expected = pd.DataFrame(data={"a": ["x", "y"]}, dtype="string[pyarrow_numpy]") + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_fsspec.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_fsspec.py new file mode 100644 index 0000000000000000000000000000000000000000..a1dec8a2d05b4fc4c39cbc910544532bf4eb0cca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_fsspec.py @@ -0,0 +1,345 @@ +import io + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + date_range, + read_csv, + read_excel, + read_feather, + read_json, + read_parquet, + read_pickle, + read_stata, + read_table, +) +import pandas._testing as tm +from pandas.util import _test_decorators as td + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def fsspectest(): + pytest.importorskip("fsspec") + from fsspec import register_implementation + from fsspec.implementations.memory import MemoryFileSystem + from fsspec.registry import _registry as registry + + class TestMemoryFS(MemoryFileSystem): + protocol = "testmem" + test = [None] + + def __init__(self, **kwargs) -> None: + self.test[0] = kwargs.pop("test", None) + super().__init__(**kwargs) + + register_implementation("testmem", TestMemoryFS, clobber=True) + yield TestMemoryFS() + registry.pop("testmem", None) + TestMemoryFS.test[0] = None + TestMemoryFS.store.clear() + + +@pytest.fixture +def df1(): + return DataFrame( + { + "int": [1, 3], + "float": [2.0, np.nan], + "str": ["t", "s"], + "dt": date_range("2018-06-18", periods=2), + } + ) + + +@pytest.fixture +def cleared_fs(): + fsspec = pytest.importorskip("fsspec") + + memfs = fsspec.filesystem("memory") + yield memfs + memfs.store.clear() + + +def test_read_csv(cleared_fs, df1): + text = str(df1.to_csv(index=False)).encode() + with cleared_fs.open("test/test.csv", "wb") as w: + w.write(text) + df2 = read_csv("memory://test/test.csv", parse_dates=["dt"]) + + tm.assert_frame_equal(df1, df2) + + +def test_reasonable_error(monkeypatch, cleared_fs): + from fsspec.registry import known_implementations + + with pytest.raises(ValueError, match="nosuchprotocol"): + read_csv("nosuchprotocol://test/test.csv") + err_msg = "test error message" + monkeypatch.setitem( + known_implementations, + "couldexist", + {"class": "unimportable.CouldExist", "err": err_msg}, + ) + with pytest.raises(ImportError, match=err_msg): + read_csv("couldexist://test/test.csv") + + +def test_to_csv(cleared_fs, df1): + df1.to_csv("memory://test/test.csv", index=True) + + df2 = read_csv("memory://test/test.csv", parse_dates=["dt"], index_col=0) + + tm.assert_frame_equal(df1, df2) + + +def test_to_excel(cleared_fs, df1): + pytest.importorskip("openpyxl") + ext = "xlsx" + path = f"memory://test/test.{ext}" + df1.to_excel(path, index=True) + + df2 = read_excel(path, parse_dates=["dt"], index_col=0) + + tm.assert_frame_equal(df1, df2) + + +@pytest.mark.parametrize("binary_mode", [False, True]) +def test_to_csv_fsspec_object(cleared_fs, binary_mode, df1): + fsspec = pytest.importorskip("fsspec") + + path = "memory://test/test.csv" + mode = "wb" if binary_mode else "w" + with fsspec.open(path, mode=mode).open() as fsspec_object: + df1.to_csv(fsspec_object, index=True) + assert not fsspec_object.closed + + mode = mode.replace("w", "r") + with fsspec.open(path, mode=mode) as fsspec_object: + df2 = read_csv( + fsspec_object, + parse_dates=["dt"], + index_col=0, + ) + assert not fsspec_object.closed + + tm.assert_frame_equal(df1, df2) + + +def test_csv_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_csv( + "testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False + ) + assert fsspectest.test[0] == "csv_write" + read_csv("testmem://test/test.csv", storage_options={"test": "csv_read"}) + assert fsspectest.test[0] == "csv_read" + + +def test_read_table_options(fsspectest): + # GH #39167 + df = DataFrame({"a": [0]}) + df.to_csv( + "testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False + ) + assert fsspectest.test[0] == "csv_write" + read_table("testmem://test/test.csv", storage_options={"test": "csv_read"}) + assert fsspectest.test[0] == "csv_read" + + +def test_excel_options(fsspectest): + pytest.importorskip("openpyxl") + extension = "xlsx" + + df = DataFrame({"a": [0]}) + + path = f"testmem://test/test.{extension}" + + df.to_excel(path, storage_options={"test": "write"}, index=False) + assert fsspectest.test[0] == "write" + read_excel(path, storage_options={"test": "read"}) + assert fsspectest.test[0] == "read" + + +def test_to_parquet_new_file(cleared_fs, df1): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + pytest.importorskip("fastparquet") + + df1.to_parquet( + "memory://test/test.csv", index=True, engine="fastparquet", compression=None + ) + + +def test_arrowparquet_options(fsspectest): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + pytest.importorskip("pyarrow") + df = DataFrame({"a": [0]}) + df.to_parquet( + "testmem://test/test.csv", + engine="pyarrow", + compression=None, + storage_options={"test": "parquet_write"}, + ) + assert fsspectest.test[0] == "parquet_write" + read_parquet( + "testmem://test/test.csv", + engine="pyarrow", + storage_options={"test": "parquet_read"}, + ) + assert fsspectest.test[0] == "parquet_read" + + +@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet +def test_fastparquet_options(fsspectest): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + pytest.importorskip("fastparquet") + + df = DataFrame({"a": [0]}) + df.to_parquet( + "testmem://test/test.csv", + engine="fastparquet", + compression=None, + storage_options={"test": "parquet_write"}, + ) + assert fsspectest.test[0] == "parquet_write" + read_parquet( + "testmem://test/test.csv", + engine="fastparquet", + storage_options={"test": "parquet_read"}, + ) + assert fsspectest.test[0] == "parquet_read" + + +@pytest.mark.single_cpu +def test_from_s3_csv(s3_public_bucket_with_data, tips_file, s3so): + pytest.importorskip("s3fs") + tm.assert_equal( + read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv", storage_options=s3so + ), + read_csv(tips_file), + ) + # the following are decompressed by pandas, not fsspec + tm.assert_equal( + read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv.gz", storage_options=s3so + ), + read_csv(tips_file), + ) + tm.assert_equal( + read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv.bz2", storage_options=s3so + ), + read_csv(tips_file), + ) + + +@pytest.mark.single_cpu +@pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"]) +def test_s3_protocols(s3_public_bucket_with_data, tips_file, protocol, s3so): + pytest.importorskip("s3fs") + tm.assert_equal( + read_csv( + f"{protocol}://{s3_public_bucket_with_data.name}/tips.csv", + storage_options=s3so, + ), + read_csv(tips_file), + ) + + +@pytest.mark.single_cpu +@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet +def test_s3_parquet(s3_public_bucket, s3so, df1): + pytest.importorskip("fastparquet") + pytest.importorskip("s3fs") + + fn = f"s3://{s3_public_bucket.name}/test.parquet" + df1.to_parquet( + fn, index=False, engine="fastparquet", compression=None, storage_options=s3so + ) + df2 = read_parquet(fn, engine="fastparquet", storage_options=s3so) + tm.assert_equal(df1, df2) + + +@td.skip_if_installed("fsspec") +def test_not_present_exception(): + msg = "Missing optional dependency 'fsspec'|fsspec library is required" + with pytest.raises(ImportError, match=msg): + read_csv("memory://test/test.csv") + + +def test_feather_options(fsspectest): + pytest.importorskip("pyarrow") + df = DataFrame({"a": [0]}) + df.to_feather("testmem://mockfile", storage_options={"test": "feather_write"}) + assert fsspectest.test[0] == "feather_write" + out = read_feather("testmem://mockfile", storage_options={"test": "feather_read"}) + assert fsspectest.test[0] == "feather_read" + tm.assert_frame_equal(df, out) + + +def test_pickle_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_pickle("testmem://mockfile", storage_options={"test": "pickle_write"}) + assert fsspectest.test[0] == "pickle_write" + out = read_pickle("testmem://mockfile", storage_options={"test": "pickle_read"}) + assert fsspectest.test[0] == "pickle_read" + tm.assert_frame_equal(df, out) + + +def test_json_options(fsspectest, compression): + df = DataFrame({"a": [0]}) + df.to_json( + "testmem://mockfile", + compression=compression, + storage_options={"test": "json_write"}, + ) + assert fsspectest.test[0] == "json_write" + out = read_json( + "testmem://mockfile", + compression=compression, + storage_options={"test": "json_read"}, + ) + assert fsspectest.test[0] == "json_read" + tm.assert_frame_equal(df, out) + + +def test_stata_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_stata( + "testmem://mockfile", storage_options={"test": "stata_write"}, write_index=False + ) + assert fsspectest.test[0] == "stata_write" + out = read_stata("testmem://mockfile", storage_options={"test": "stata_read"}) + assert fsspectest.test[0] == "stata_read" + tm.assert_frame_equal(df, out.astype("int64")) + + +def test_markdown_options(fsspectest): + pytest.importorskip("tabulate") + df = DataFrame({"a": [0]}) + df.to_markdown("testmem://mockfile", storage_options={"test": "md_write"}) + assert fsspectest.test[0] == "md_write" + assert fsspectest.cat("testmem://mockfile") + + +def test_non_fsspec_options(): + pytest.importorskip("pyarrow") + with pytest.raises(ValueError, match="storage_options"): + read_csv("localfile", storage_options={"a": True}) + with pytest.raises(ValueError, match="storage_options"): + # separate test for parquet, which has a different code path + read_parquet("localfile", storage_options={"a": True}) + by = io.BytesIO() + + with pytest.raises(ValueError, match="storage_options"): + read_csv(by, storage_options={"a": True}) + + df = DataFrame({"a": [0]}) + with pytest.raises(ValueError, match="storage_options"): + df.to_parquet("nonfsspecpath", storage_options={"a": True}) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gbq.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gbq.py new file mode 100644 index 0000000000000000000000000000000000000000..b2b212ceb2c41c9a8fa0828b691b6161db02d62f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gbq.py @@ -0,0 +1,14 @@ +import pandas as pd +import pandas._testing as tm + + +def test_read_gbq_deprecated(): + with tm.assert_produces_warning(FutureWarning): + with tm.external_error_raised(Exception): + pd.read_gbq("fake") + + +def test_to_gbq_deprecated(): + with tm.assert_produces_warning(FutureWarning): + with tm.external_error_raised(Exception): + pd.DataFrame(range(1)).to_gbq("fake") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gcs.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gcs.py new file mode 100644 index 0000000000000000000000000000000000000000..0ce6a8bf82cd835d64ec7bab242ec6444973b64e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gcs.py @@ -0,0 +1,219 @@ +from io import BytesIO +import os +import pathlib +import tarfile +import zipfile + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + date_range, + read_csv, + read_excel, + read_json, + read_parquet, +) +import pandas._testing as tm +from pandas.util import _test_decorators as td + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def gcs_buffer(): + """Emulate GCS using a binary buffer.""" + pytest.importorskip("gcsfs") + fsspec = pytest.importorskip("fsspec") + + gcs_buffer = BytesIO() + gcs_buffer.close = lambda: True + + class MockGCSFileSystem(fsspec.AbstractFileSystem): + @staticmethod + def open(*args, **kwargs): + gcs_buffer.seek(0) + return gcs_buffer + + def ls(self, path, **kwargs): + # needed for pyarrow + return [{"name": path, "type": "file"}] + + # Overwrites the default implementation from gcsfs to our mock class + fsspec.register_implementation("gs", MockGCSFileSystem, clobber=True) + + return gcs_buffer + + +# Patches pyarrow; other processes should not pick up change +@pytest.mark.single_cpu +@pytest.mark.parametrize("format", ["csv", "json", "parquet", "excel", "markdown"]) +def test_to_read_gcs(gcs_buffer, format, monkeypatch, capsys): + """ + Test that many to/read functions support GCS. + + GH 33987 + """ + + df1 = DataFrame( + { + "int": [1, 3], + "float": [2.0, np.nan], + "str": ["t", "s"], + "dt": date_range("2018-06-18", periods=2), + } + ) + + path = f"gs://test/test.{format}" + + if format == "csv": + df1.to_csv(path, index=True) + df2 = read_csv(path, parse_dates=["dt"], index_col=0) + elif format == "excel": + path = "gs://test/test.xlsx" + df1.to_excel(path) + df2 = read_excel(path, parse_dates=["dt"], index_col=0) + elif format == "json": + df1.to_json(path) + df2 = read_json(path, convert_dates=["dt"]) + elif format == "parquet": + pytest.importorskip("pyarrow") + pa_fs = pytest.importorskip("pyarrow.fs") + + class MockFileSystem(pa_fs.FileSystem): + @staticmethod + def from_uri(path): + print("Using pyarrow filesystem") + to_local = pathlib.Path(path.replace("gs://", "")).absolute().as_uri() + return pa_fs.LocalFileSystem(to_local) + + with monkeypatch.context() as m: + m.setattr(pa_fs, "FileSystem", MockFileSystem) + df1.to_parquet(path) + df2 = read_parquet(path) + captured = capsys.readouterr() + assert captured.out == "Using pyarrow filesystem\nUsing pyarrow filesystem\n" + elif format == "markdown": + pytest.importorskip("tabulate") + df1.to_markdown(path) + df2 = df1 + + tm.assert_frame_equal(df1, df2) + + +def assert_equal_zip_safe(result: bytes, expected: bytes, compression: str): + """ + For zip compression, only compare the CRC-32 checksum of the file contents + to avoid checking the time-dependent last-modified timestamp which + in some CI builds is off-by-one + + See https://en.wikipedia.org/wiki/ZIP_(file_format)#File_headers + """ + if compression == "zip": + # Only compare the CRC checksum of the file contents + with zipfile.ZipFile(BytesIO(result)) as exp, zipfile.ZipFile( + BytesIO(expected) + ) as res: + for res_info, exp_info in zip(res.infolist(), exp.infolist()): + assert res_info.CRC == exp_info.CRC + elif compression == "tar": + with tarfile.open(fileobj=BytesIO(result)) as tar_exp, tarfile.open( + fileobj=BytesIO(expected) + ) as tar_res: + for tar_res_info, tar_exp_info in zip( + tar_res.getmembers(), tar_exp.getmembers() + ): + actual_file = tar_res.extractfile(tar_res_info) + expected_file = tar_exp.extractfile(tar_exp_info) + assert (actual_file is None) == (expected_file is None) + if actual_file is not None and expected_file is not None: + assert actual_file.read() == expected_file.read() + else: + assert result == expected + + +@pytest.mark.parametrize("encoding", ["utf-8", "cp1251"]) +def test_to_csv_compression_encoding_gcs( + gcs_buffer, compression_only, encoding, compression_to_extension +): + """ + Compression and encoding should with GCS. + + GH 35677 (to_csv, compression), GH 26124 (to_csv, encoding), and + GH 32392 (read_csv, encoding) + """ + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # reference of compressed and encoded file + compression = {"method": compression_only} + if compression_only == "gzip": + compression["mtime"] = 1 # be reproducible + buffer = BytesIO() + df.to_csv(buffer, compression=compression, encoding=encoding, mode="wb") + + # write compressed file with explicit compression + path_gcs = "gs://test/test.csv" + df.to_csv(path_gcs, compression=compression, encoding=encoding) + res = gcs_buffer.getvalue() + expected = buffer.getvalue() + assert_equal_zip_safe(res, expected, compression_only) + + read_df = read_csv( + path_gcs, index_col=0, compression=compression_only, encoding=encoding + ) + tm.assert_frame_equal(df, read_df) + + # write compressed file with implicit compression + file_ext = compression_to_extension[compression_only] + compression["method"] = "infer" + path_gcs += f".{file_ext}" + df.to_csv(path_gcs, compression=compression, encoding=encoding) + + res = gcs_buffer.getvalue() + expected = buffer.getvalue() + assert_equal_zip_safe(res, expected, compression_only) + + read_df = read_csv(path_gcs, index_col=0, compression="infer", encoding=encoding) + tm.assert_frame_equal(df, read_df) + + +def test_to_parquet_gcs_new_file(monkeypatch, tmpdir): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + pytest.importorskip("fastparquet") + pytest.importorskip("gcsfs") + + from fsspec import AbstractFileSystem + + df1 = DataFrame( + { + "int": [1, 3], + "float": [2.0, np.nan], + "str": ["t", "s"], + "dt": date_range("2018-06-18", periods=2), + } + ) + + class MockGCSFileSystem(AbstractFileSystem): + def open(self, path, mode="r", *args): + if "w" not in mode: + raise FileNotFoundError + return open(os.path.join(tmpdir, "test.parquet"), mode, encoding="utf-8") + + monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem) + df1.to_parquet( + "gs://test/test.csv", index=True, engine="fastparquet", compression=None + ) + + +@td.skip_if_installed("gcsfs") +def test_gcs_not_present_exception(): + with tm.external_error_raised(ImportError): + read_csv("gs://test/test.csv") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_html.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_html.py new file mode 100644 index 0000000000000000000000000000000000000000..607357e709b6ec94225c8ff266219abdb763e085 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_html.py @@ -0,0 +1,1657 @@ +from collections.abc import Iterator +from functools import partial +from io import ( + BytesIO, + StringIO, +) +import os +from pathlib import Path +import re +import threading +from urllib.error import URLError + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + NA, + DataFrame, + MultiIndex, + Series, + Timestamp, + date_range, + read_csv, + read_html, + to_datetime, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +from pandas.io.common import file_path_to_url + + +@pytest.fixture( + params=[ + "chinese_utf-16.html", + "chinese_utf-32.html", + "chinese_utf-8.html", + "letz_latin1.html", + ] +) +def html_encoding_file(request, datapath): + """Parametrized fixture for HTML encoding test filenames.""" + return datapath("io", "data", "html_encoding", request.param) + + +def assert_framelist_equal(list1, list2, *args, **kwargs): + assert len(list1) == len(list2), ( + "lists are not of equal size " + f"len(list1) == {len(list1)}, " + f"len(list2) == {len(list2)}" + ) + msg = "not all list elements are DataFrames" + both_frames = all( + map( + lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame), + list1, + list2, + ) + ) + assert both_frames, msg + for frame_i, frame_j in zip(list1, list2): + tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs) + assert not frame_i.empty, "frames are both empty" + + +def test_bs4_version_fails(monkeypatch, datapath): + bs4 = pytest.importorskip("bs4") + pytest.importorskip("html5lib") + + monkeypatch.setattr(bs4, "__version__", "4.2") + with pytest.raises(ImportError, match="Pandas requires version"): + read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4") + + +def test_invalid_flavor(): + url = "google.com" + flavor = "invalid flavor" + msg = r"\{" + flavor + r"\} is not a valid set of flavors" + + with pytest.raises(ValueError, match=msg): + read_html(StringIO(url), match="google", flavor=flavor) + + +def test_same_ordering(datapath): + pytest.importorskip("bs4") + pytest.importorskip("lxml") + pytest.importorskip("html5lib") + + filename = datapath("io", "data", "html", "valid_markup.html") + dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"]) + dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"]) + assert_framelist_equal(dfs_lxml, dfs_bs4) + + +@pytest.fixture( + params=[ + pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]), + pytest.param("lxml", marks=td.skip_if_no("lxml")), + ], +) +def flavor_read_html(request): + return partial(read_html, flavor=request.param) + + +class TestReadHtml: + def test_literal_html_deprecation(self, flavor_read_html): + # GH 53785 + msg = ( + "Passing literal html to 'read_html' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object." + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + flavor_read_html( + """ + + + + + + + + + + + + + + + + + + +
AB
12
34
""" + ) + + @pytest.fixture + def spam_data(self, datapath): + return datapath("io", "data", "html", "spam.html") + + @pytest.fixture + def banklist_data(self, datapath): + return datapath("io", "data", "html", "banklist.html") + + def test_to_html_compat(self, flavor_read_html): + df = ( + DataFrame( + np.random.default_rng(2).random((4, 3)), + columns=pd.Index(list("abc"), dtype=object), + ) + # pylint: disable-next=consider-using-f-string + .map("{:.3f}".format).astype(float) + ) + out = df.to_html() + res = flavor_read_html( + StringIO(out), attrs={"class": "dataframe"}, index_col=0 + )[0] + tm.assert_frame_equal(res, df) + + def test_dtype_backend(self, string_storage, dtype_backend, flavor_read_html): + # GH#50286 + df = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": [True, False, None], + "f": [True, False, True], + "g": ["a", "b", "c"], + "h": ["a", "b", None], + } + ) + + if string_storage == "python": + string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) + string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_)) + elif dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) + else: + pa = pytest.importorskip("pyarrow") + string_array = ArrowStringArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArray(pa.array(["a", "b", None])) + + out = df.to_html(index=False) + with pd.option_context("mode.string_storage", string_storage): + result = flavor_read_html(StringIO(out), dtype_backend=dtype_backend)[0] + + expected = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": Series([True, False, NA], dtype="boolean"), + "f": Series([True, False, True], dtype="boolean"), + "g": string_array, + "h": string_array_na, + } + ) + + if dtype_backend == "pyarrow": + import pyarrow as pa + + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_banklist_url(self, httpserver, banklist_data, flavor_read_html): + with open(banklist_data, encoding="utf-8") as f: + httpserver.serve_content(content=f.read()) + df1 = flavor_read_html( + # lxml cannot find attrs leave out for now + httpserver.url, + match="First Federal Bank of Florida", # attrs={"class": "dataTable"} + ) + # lxml cannot find attrs leave out for now + df2 = flavor_read_html( + httpserver.url, + match="Metcalf Bank", + ) # attrs={"class": "dataTable"}) + + assert_framelist_equal(df1, df2) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_spam_url(self, httpserver, spam_data, flavor_read_html): + with open(spam_data, encoding="utf-8") as f: + httpserver.serve_content(content=f.read()) + df1 = flavor_read_html(httpserver.url, match=".*Water.*") + df2 = flavor_read_html(httpserver.url, match="Unit") + + assert_framelist_equal(df1, df2) + + @pytest.mark.slow + def test_banklist(self, banklist_data, flavor_read_html): + df1 = flavor_read_html( + banklist_data, match=".*Florida.*", attrs={"id": "table"} + ) + df2 = flavor_read_html( + banklist_data, match="Metcalf Bank", attrs={"id": "table"} + ) + + assert_framelist_equal(df1, df2) + + def test_spam(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*") + df2 = flavor_read_html(spam_data, match="Unit") + assert_framelist_equal(df1, df2) + + assert df1[0].iloc[0, 0] == "Proximates" + assert df1[0].columns[0] == "Nutrient" + + def test_spam_no_match(self, spam_data, flavor_read_html): + dfs = flavor_read_html(spam_data) + for df in dfs: + assert isinstance(df, DataFrame) + + def test_banklist_no_match(self, banklist_data, flavor_read_html): + dfs = flavor_read_html(banklist_data, attrs={"id": "table"}) + for df in dfs: + assert isinstance(df, DataFrame) + + def test_spam_header(self, spam_data, flavor_read_html): + df = flavor_read_html(spam_data, match=".*Water.*", header=2)[0] + assert df.columns[0] == "Proximates" + assert not df.empty + + def test_skiprows_int(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=1) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=1) + + assert_framelist_equal(df1, df2) + + def test_skiprows_range(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=range(2)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=range(2)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_list(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=[1, 2]) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=[2, 1]) + + assert_framelist_equal(df1, df2) + + def test_skiprows_set(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows={1, 2}) + df2 = flavor_read_html(spam_data, match="Unit", skiprows={2, 1}) + + assert_framelist_equal(df1, df2) + + def test_skiprows_slice(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=1) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=1) + + assert_framelist_equal(df1, df2) + + def test_skiprows_slice_short(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=slice(2)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=slice(2)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_slice_long(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=slice(2, 5)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=slice(4, 1, -1)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_ndarray(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=np.arange(2)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=np.arange(2)) + + assert_framelist_equal(df1, df2) + + def test_skiprows_invalid(self, spam_data, flavor_read_html): + with pytest.raises(TypeError, match=("is not a valid type for skipping rows")): + flavor_read_html(spam_data, match=".*Water.*", skiprows="asdf") + + def test_index(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", index_col=0) + assert_framelist_equal(df1, df2) + + def test_header_and_index_no_types(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", header=1, index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", header=1, index_col=0) + assert_framelist_equal(df1, df2) + + def test_header_and_index_with_types(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", header=1, index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", header=1, index_col=0) + assert_framelist_equal(df1, df2) + + def test_infer_types(self, spam_data, flavor_read_html): + # 10892 infer_types removed + df1 = flavor_read_html(spam_data, match=".*Water.*", index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", index_col=0) + assert_framelist_equal(df1, df2) + + def test_string_io(self, spam_data, flavor_read_html): + with open(spam_data, encoding="UTF-8") as f: + data1 = StringIO(f.read()) + + with open(spam_data, encoding="UTF-8") as f: + data2 = StringIO(f.read()) + + df1 = flavor_read_html(data1, match=".*Water.*") + df2 = flavor_read_html(data2, match="Unit") + assert_framelist_equal(df1, df2) + + def test_string(self, spam_data, flavor_read_html): + with open(spam_data, encoding="UTF-8") as f: + data = f.read() + + df1 = flavor_read_html(StringIO(data), match=".*Water.*") + df2 = flavor_read_html(StringIO(data), match="Unit") + + assert_framelist_equal(df1, df2) + + def test_file_like(self, spam_data, flavor_read_html): + with open(spam_data, encoding="UTF-8") as f: + df1 = flavor_read_html(f, match=".*Water.*") + + with open(spam_data, encoding="UTF-8") as f: + df2 = flavor_read_html(f, match="Unit") + + assert_framelist_equal(df1, df2) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_bad_url_protocol(self, httpserver, flavor_read_html): + httpserver.serve_content("urlopen error unknown url type: git", code=404) + with pytest.raises(URLError, match="urlopen error unknown url type: git"): + flavor_read_html("git://github.com", match=".*Water.*") + + @pytest.mark.slow + @pytest.mark.network + @pytest.mark.single_cpu + def test_invalid_url(self, httpserver, flavor_read_html): + httpserver.serve_content("Name or service not known", code=404) + with pytest.raises((URLError, ValueError), match="HTTP Error 404: NOT FOUND"): + flavor_read_html(httpserver.url, match=".*Water.*") + + @pytest.mark.slow + def test_file_url(self, banklist_data, flavor_read_html): + url = banklist_data + dfs = flavor_read_html( + file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"} + ) + assert isinstance(dfs, list) + for df in dfs: + assert isinstance(df, DataFrame) + + @pytest.mark.slow + def test_invalid_table_attrs(self, banklist_data, flavor_read_html): + url = banklist_data + with pytest.raises(ValueError, match="No tables found"): + flavor_read_html( + url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"} + ) + + @pytest.mark.slow + def test_multiindex_header(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, match="Metcalf", attrs={"id": "table"}, header=[0, 1] + )[0] + assert isinstance(df.columns, MultiIndex) + + @pytest.mark.slow + def test_multiindex_index(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, match="Metcalf", attrs={"id": "table"}, index_col=[0, 1] + )[0] + assert isinstance(df.index, MultiIndex) + + @pytest.mark.slow + def test_multiindex_header_index(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + index_col=[0, 1], + )[0] + assert isinstance(df.columns, MultiIndex) + assert isinstance(df.index, MultiIndex) + + @pytest.mark.slow + def test_multiindex_header_skiprows_tuples(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + skiprows=1, + )[0] + assert isinstance(df.columns, MultiIndex) + + @pytest.mark.slow + def test_multiindex_header_skiprows(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + skiprows=1, + )[0] + assert isinstance(df.columns, MultiIndex) + + @pytest.mark.slow + def test_multiindex_header_index_skiprows(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + index_col=[0, 1], + skiprows=1, + )[0] + assert isinstance(df.index, MultiIndex) + assert isinstance(df.columns, MultiIndex) + + @pytest.mark.slow + def test_regex_idempotency(self, banklist_data, flavor_read_html): + url = banklist_data + dfs = flavor_read_html( + file_path_to_url(os.path.abspath(url)), + match=re.compile(re.compile("Florida")), + attrs={"id": "table"}, + ) + assert isinstance(dfs, list) + for df in dfs: + assert isinstance(df, DataFrame) + + def test_negative_skiprows(self, spam_data, flavor_read_html): + msg = r"\(you passed a negative value\)" + with pytest.raises(ValueError, match=msg): + flavor_read_html(spam_data, match="Water", skiprows=-1) + + @pytest.fixture + def python_docs(self): + return """ + + +
+ + + + + + + + + + + + +
+ +

Indices and tables:

+ + +
+ + + + + + +
+ """ # noqa: E501 + + @pytest.mark.network + @pytest.mark.single_cpu + def test_multiple_matches(self, python_docs, httpserver, flavor_read_html): + httpserver.serve_content(content=python_docs) + dfs = flavor_read_html(httpserver.url, match="Python") + assert len(dfs) > 1 + + @pytest.mark.network + @pytest.mark.single_cpu + def test_python_docs_table(self, python_docs, httpserver, flavor_read_html): + httpserver.serve_content(content=python_docs) + dfs = flavor_read_html(httpserver.url, match="Python") + zz = [df.iloc[0, 0][0:4] for df in dfs] + assert sorted(zz) == ["Pyth", "What"] + + def test_empty_tables(self, flavor_read_html): + """ + Make sure that read_html ignores empty tables. + """ + html = """ + + + + + + + + + + + + + +
AB
12
+ + + +
+ """ + result = flavor_read_html(StringIO(html)) + assert len(result) == 1 + + def test_multiple_tbody(self, flavor_read_html): + # GH-20690 + # Read all tbody tags within a single table. + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + + + + + + +
AB
12
34
""" + ) + )[0] + + expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"]) + + tm.assert_frame_equal(result, expected) + + def test_header_and_one_column(self, flavor_read_html): + """ + Don't fail with bs4 when there is a header and only one column + as described in issue #9178 + """ + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + +
Header
first
""" + ) + )[0] + + expected = DataFrame(data={"Header": "first"}, index=[0]) + + tm.assert_frame_equal(result, expected) + + def test_thead_without_tr(self, flavor_read_html): + """ + Ensure parser adds within on malformed HTML. + """ + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + + +
CountryMunicipalityYear
UkraineOdessa1944
""" + ) + )[0] + + expected = DataFrame( + data=[["Ukraine", "Odessa", 1944]], + columns=["Country", "Municipality", "Year"], + ) + + tm.assert_frame_equal(result, expected) + + def test_tfoot_read(self, flavor_read_html): + """ + Make sure that read_html reads tfoot, containing td or th. + Ignores empty tfoot + """ + data_template = """ + + + + + + + + + + + + + + {footer} + +
AB
bodyAbodyB
""" + + expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"]) + + expected2 = DataFrame( + data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"] + ) + + data1 = data_template.format(footer="") + data2 = data_template.format(footer="footAfootB") + + result1 = flavor_read_html(StringIO(data1))[0] + result2 = flavor_read_html(StringIO(data2))[0] + + tm.assert_frame_equal(result1, expected1) + tm.assert_frame_equal(result2, expected2) + + def test_parse_header_of_non_string_column(self, flavor_read_html): + # GH5048: if header is specified explicitly, an int column should be + # parsed as int while its header is parsed as str + result = flavor_read_html( + StringIO( + """ + + + + + + + + + +
SI
text1944
+ """ + ), + header=0, + )[0] + + expected = DataFrame([["text", 1944]], columns=("S", "I")) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.slow + def test_banklist_header(self, banklist_data, datapath, flavor_read_html): + from pandas.io.html import _remove_whitespace + + def try_remove_ws(x): + try: + return _remove_whitespace(x) + except AttributeError: + return x + + df = flavor_read_html(banklist_data, match="Metcalf", attrs={"id": "table"})[0] + ground_truth = read_csv( + datapath("io", "data", "csv", "banklist.csv"), + converters={"Updated Date": Timestamp, "Closing Date": Timestamp}, + ) + assert df.shape == ground_truth.shape + old = [ + "First Vietnamese American Bank In Vietnamese", + "Westernbank Puerto Rico En Espanol", + "R-G Premier Bank of Puerto Rico En Espanol", + "Eurobank En Espanol", + "Sanderson State Bank En Espanol", + "Washington Mutual Bank (Including its subsidiary Washington " + "Mutual Bank FSB)", + "Silver State Bank En Espanol", + "AmTrade International Bank En Espanol", + "Hamilton Bank, NA En Espanol", + "The Citizens Savings Bank Pioneer Community Bank, Inc.", + ] + new = [ + "First Vietnamese American Bank", + "Westernbank Puerto Rico", + "R-G Premier Bank of Puerto Rico", + "Eurobank", + "Sanderson State Bank", + "Washington Mutual Bank", + "Silver State Bank", + "AmTrade International Bank", + "Hamilton Bank, NA", + "The Citizens Savings Bank", + ] + dfnew = df.map(try_remove_ws).replace(old, new) + gtnew = ground_truth.map(try_remove_ws) + converted = dfnew + date_cols = ["Closing Date", "Updated Date"] + converted[date_cols] = converted[date_cols].apply(to_datetime) + tm.assert_frame_equal(converted, gtnew) + + @pytest.mark.slow + def test_gold_canyon(self, banklist_data, flavor_read_html): + gc = "Gold Canyon" + with open(banklist_data, encoding="utf-8") as f: + raw_text = f.read() + + assert gc in raw_text + df = flavor_read_html( + banklist_data, match="Gold Canyon", attrs={"id": "table"} + )[0] + assert gc in df.to_string() + + def test_different_number_of_cols(self, flavor_read_html): + expected = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
C_l0_g0C_l0_g1C_l0_g2C_l0_g3C_l0_g4
R_l0_g0 0.763 0.233 nan nan nan
R_l0_g1 0.244 0.285 0.392 0.137 0.222
""" + ), + index_col=0, + )[0] + + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + + + + + + + + + + + + + +
C_l0_g0C_l0_g1C_l0_g2C_l0_g3C_l0_g4
R_l0_g0 0.763 0.233
R_l0_g1 0.244 0.285 0.392 0.137 0.222
""" + ), + index_col=0, + )[0] + + tm.assert_frame_equal(result, expected) + + def test_colspan_rowspan_1(self, flavor_read_html): + # GH17054 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + +
ABC
abc
+ """ + ) + )[0] + + expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"]) + + tm.assert_frame_equal(result, expected) + + def test_colspan_rowspan_copy_values(self, flavor_read_html): + # GH17054 + + # In ASCII, with lowercase letters being copies: + # + # X x Y Z W + # A B b z C + + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + +
XYZW
ABC
+ """ + ), + header=0, + )[0] + + expected = DataFrame( + data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"] + ) + + tm.assert_frame_equal(result, expected) + + def test_colspan_rowspan_both_not_1(self, flavor_read_html): + # GH17054 + + # In ASCII, with lowercase letters being copies: + # + # A B b b C + # a b b b D + + result = flavor_read_html( + StringIO( + """ + + + + + + + + + +
ABC
D
+ """ + ), + header=0, + )[0] + + expected = DataFrame( + data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"] + ) + + tm.assert_frame_equal(result, expected) + + def test_rowspan_at_end_of_row(self, flavor_read_html): + # GH17054 + + # In ASCII, with lowercase letters being copies: + # + # A B + # C b + + result = flavor_read_html( + StringIO( + """ + + + + + + + + +
AB
C
+ """ + ), + header=0, + )[0] + + expected = DataFrame(data=[["C", "B"]], columns=["A", "B"]) + + tm.assert_frame_equal(result, expected) + + def test_rowspan_only_rows(self, flavor_read_html): + # GH17054 + + result = flavor_read_html( + StringIO( + """ + + + + + +
AB
+ """ + ), + header=0, + )[0] + + expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"]) + + tm.assert_frame_equal(result, expected) + + def test_header_inferred_from_rows_with_only_th(self, flavor_read_html): + # GH17054 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + +
AB
ab
12
+ """ + ) + )[0] + + columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]]) + expected = DataFrame(data=[[1, 2]], columns=columns) + + tm.assert_frame_equal(result, expected) + + def test_parse_dates_list(self, flavor_read_html): + df = DataFrame({"date": date_range("1/1/2001", periods=10)}) + expected = df.to_html() + res = flavor_read_html(StringIO(expected), parse_dates=[1], index_col=0) + tm.assert_frame_equal(df, res[0]) + res = flavor_read_html(StringIO(expected), parse_dates=["date"], index_col=0) + tm.assert_frame_equal(df, res[0]) + + def test_parse_dates_combine(self, flavor_read_html): + raw_dates = Series(date_range("1/1/2001", periods=10)) + df = DataFrame( + { + "date": raw_dates.map(lambda x: str(x.date())), + "time": raw_dates.map(lambda x: str(x.time())), + } + ) + res = flavor_read_html( + StringIO(df.to_html()), parse_dates={"datetime": [1, 2]}, index_col=1 + ) + newdf = DataFrame({"datetime": raw_dates}) + tm.assert_frame_equal(newdf, res[0]) + + def test_wikipedia_states_table(self, datapath, flavor_read_html): + data = datapath("io", "data", "html", "wikipedia_states.html") + assert os.path.isfile(data), f"{repr(data)} is not a file" + assert os.path.getsize(data), f"{repr(data)} is an empty file" + result = flavor_read_html(data, match="Arizona", header=1)[0] + assert result.shape == (60, 12) + assert "Unnamed" in result.columns[-1] + assert result["sq mi"].dtype == np.dtype("float64") + assert np.allclose(result.loc[0, "sq mi"], 665384.04) + + def test_wikipedia_states_multiindex(self, datapath, flavor_read_html): + data = datapath("io", "data", "html", "wikipedia_states.html") + result = flavor_read_html(data, match="Arizona", index_col=0)[0] + assert result.shape == (60, 11) + assert "Unnamed" in result.columns[-1][1] + assert result.columns.nlevels == 2 + assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04) + + def test_parser_error_on_empty_header_row(self, flavor_read_html): + result = flavor_read_html( + StringIO( + """ + + + + + + + + +
AB
ab
+ """ + ), + header=[0, 1], + ) + expected = DataFrame( + [["a", "b"]], + columns=MultiIndex.from_tuples( + [("Unnamed: 0_level_0", "A"), ("Unnamed: 1_level_0", "B")] + ), + ) + tm.assert_frame_equal(result[0], expected) + + def test_decimal_rows(self, flavor_read_html): + # GH 12907 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + +
Header
1100#101
+ + """ + ), + decimal="#", + )[0] + + expected = DataFrame(data={"Header": 1100.101}, index=[0]) + + assert result["Header"].dtype == np.dtype("float64") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("arg", [True, False]) + def test_bool_header_arg(self, spam_data, arg, flavor_read_html): + # GH 6114 + msg = re.escape( + "Passing a bool to header is invalid. Use header=None for no header or " + "header=int or list-like of ints to specify the row(s) making up the " + "column names" + ) + with pytest.raises(TypeError, match=msg): + flavor_read_html(spam_data, header=arg) + + def test_converters(self, flavor_read_html): + # GH 13461 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + +
a
0.763
0.244
""" + ), + converters={"a": str}, + )[0] + + expected = DataFrame({"a": ["0.763", "0.244"]}) + + tm.assert_frame_equal(result, expected) + + def test_na_values(self, flavor_read_html): + # GH 13461 + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + +
a
0.763
0.244
""" + ), + na_values=[0.244], + )[0] + + expected = DataFrame({"a": [0.763, np.nan]}) + + tm.assert_frame_equal(result, expected) + + def test_keep_default_na(self, flavor_read_html): + html_data = """ + + + + + + + + + + + + + +
a
N/A
NA
""" + + expected_df = DataFrame({"a": ["N/A", "NA"]}) + html_df = flavor_read_html(StringIO(html_data), keep_default_na=False)[0] + tm.assert_frame_equal(expected_df, html_df) + + expected_df = DataFrame({"a": [np.nan, np.nan]}) + html_df = flavor_read_html(StringIO(html_data), keep_default_na=True)[0] + tm.assert_frame_equal(expected_df, html_df) + + def test_preserve_empty_rows(self, flavor_read_html): + result = flavor_read_html( + StringIO( + """ + + + + + + + + + + + + + +
AB
ab
+ """ + ) + )[0] + + expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"]) + + tm.assert_frame_equal(result, expected) + + def test_ignore_empty_rows_when_inferring_header(self, flavor_read_html): + result = flavor_read_html( + StringIO( + """ + + + + + + + + + +
AB
ab
12
+ """ + ) + )[0] + + columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]]) + expected = DataFrame(data=[[1, 2]], columns=columns) + + tm.assert_frame_equal(result, expected) + + def test_multiple_header_rows(self, flavor_read_html): + # Issue #13434 + expected_df = DataFrame( + data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")] + ) + expected_df.columns = [ + ["Unnamed: 0_level_0", "Age", "Party"], + ["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"], + ] + html = expected_df.to_html(index=False) + html_df = flavor_read_html(StringIO(html))[0] + tm.assert_frame_equal(expected_df, html_df) + + def test_works_on_valid_markup(self, datapath, flavor_read_html): + filename = datapath("io", "data", "html", "valid_markup.html") + dfs = flavor_read_html(filename, index_col=0) + assert isinstance(dfs, list) + assert isinstance(dfs[0], DataFrame) + + @pytest.mark.slow + def test_fallback_success(self, datapath, flavor_read_html): + banklist_data = datapath("io", "data", "html", "banklist.html") + + flavor_read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"]) + + def test_to_html_timestamp(self): + rng = date_range("2000-01-01", periods=10) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)), index=rng) + + result = df.to_html() + assert "2000-01-01" in result + + def test_to_html_borderless(self): + df = DataFrame([{"A": 1, "B": 2}]) + out_border_default = df.to_html() + out_border_true = df.to_html(border=True) + out_border_explicit_default = df.to_html(border=1) + out_border_nondefault = df.to_html(border=2) + out_border_zero = df.to_html(border=0) + + out_border_false = df.to_html(border=False) + + assert ' border="1"' in out_border_default + assert out_border_true == out_border_default + assert out_border_default == out_border_explicit_default + assert out_border_default != out_border_nondefault + assert ' border="2"' in out_border_nondefault + assert ' border="0"' not in out_border_zero + assert " border" not in out_border_false + assert out_border_zero == out_border_false + + @pytest.mark.parametrize( + "displayed_only,exp0,exp1", + [ + (True, DataFrame(["foo"]), None), + (False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])), + ], + ) + def test_displayed_only(self, displayed_only, exp0, exp1, flavor_read_html): + # GH 20027 + data = """ + + + + + +
+ foo + bar + baz + qux +
+ + + + +
foo
+ + """ + + dfs = flavor_read_html(StringIO(data), displayed_only=displayed_only) + tm.assert_frame_equal(dfs[0], exp0) + + if exp1 is not None: + tm.assert_frame_equal(dfs[1], exp1) + else: + assert len(dfs) == 1 # Should not parse hidden table + + @pytest.mark.parametrize("displayed_only", [True, False]) + def test_displayed_only_with_many_elements(self, displayed_only, flavor_read_html): + html_table = """ + + + + + + + + + + + + + +
AB
12
45
+ """ + result = flavor_read_html(StringIO(html_table), displayed_only=displayed_only)[ + 0 + ] + expected = DataFrame({"A": [1, 4], "B": [2, 5]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:You provided Unicode markup but also provided a value for " + "from_encoding.*:UserWarning" + ) + def test_encode(self, html_encoding_file, flavor_read_html): + base_path = os.path.basename(html_encoding_file) + root = os.path.splitext(base_path)[0] + _, encoding = root.split("_") + + try: + with open(html_encoding_file, "rb") as fobj: + from_string = flavor_read_html( + fobj.read(), encoding=encoding, index_col=0 + ).pop() + + with open(html_encoding_file, "rb") as fobj: + from_file_like = flavor_read_html( + BytesIO(fobj.read()), encoding=encoding, index_col=0 + ).pop() + + from_filename = flavor_read_html( + html_encoding_file, encoding=encoding, index_col=0 + ).pop() + tm.assert_frame_equal(from_string, from_file_like) + tm.assert_frame_equal(from_string, from_filename) + except Exception: + # seems utf-16/32 fail on windows + if is_platform_windows(): + if "16" in encoding or "32" in encoding: + pytest.skip() + raise + + def test_parse_failure_unseekable(self, flavor_read_html): + # Issue #17975 + + if flavor_read_html.keywords.get("flavor") == "lxml": + pytest.skip("Not applicable for lxml") + + class UnseekableStringIO(StringIO): + def seekable(self): + return False + + bad = UnseekableStringIO( + """ +
spameggs
""" + ) + + assert flavor_read_html(bad) + + with pytest.raises(ValueError, match="passed a non-rewindable file object"): + flavor_read_html(bad) + + def test_parse_failure_rewinds(self, flavor_read_html): + # Issue #17975 + + class MockFile: + def __init__(self, data) -> None: + self.data = data + self.at_end = False + + def read(self, size=None): + data = "" if self.at_end else self.data + self.at_end = True + return data + + def seek(self, offset): + self.at_end = False + + def seekable(self): + return True + + # GH 49036 pylint checks for presence of __next__ for iterators + def __next__(self): + ... + + def __iter__(self) -> Iterator: + # `is_file_like` depends on the presence of + # the __iter__ attribute. + return self + + good = MockFile("
spam
eggs
") + bad = MockFile("
spameggs
") + + assert flavor_read_html(good) + assert flavor_read_html(bad) + + @pytest.mark.slow + @pytest.mark.single_cpu + def test_importcheck_thread_safety(self, datapath, flavor_read_html): + # see gh-16928 + + class ErrorThread(threading.Thread): + def run(self): + try: + super().run() + except Exception as err: + self.err = err + else: + self.err = None + + filename = datapath("io", "data", "html", "valid_markup.html") + helper_thread1 = ErrorThread(target=flavor_read_html, args=(filename,)) + helper_thread2 = ErrorThread(target=flavor_read_html, args=(filename,)) + + helper_thread1.start() + helper_thread2.start() + + while helper_thread1.is_alive() or helper_thread2.is_alive(): + pass + assert None is helper_thread1.err is helper_thread2.err + + def test_parse_path_object(self, datapath, flavor_read_html): + # GH 37705 + file_path_string = datapath("io", "data", "html", "spam.html") + file_path = Path(file_path_string) + df1 = flavor_read_html(file_path_string)[0] + df2 = flavor_read_html(file_path)[0] + tm.assert_frame_equal(df1, df2) + + def test_parse_br_as_space(self, flavor_read_html): + # GH 29528: pd.read_html() convert
to space + result = flavor_read_html( + StringIO( + """ + + + + + + + +
A
word1
word2
+ """ + ) + )[0] + + expected = DataFrame(data=[["word1 word2"]], columns=["A"]) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("arg", ["all", "body", "header", "footer"]) + def test_extract_links(self, arg, flavor_read_html): + gh_13141_data = """ + + + + + + + + + + + + + + + + + +
HTTPFTPLinkless
WikipediaSURROUNDING Debian TEXTLinkless
Footer + Multiple links: Only first captured. +
+ """ + + gh_13141_expected = { + "head_ignore": ["HTTP", "FTP", "Linkless"], + "head_extract": [ + ("HTTP", None), + ("FTP", None), + ("Linkless", "https://en.wiktionary.org/wiki/linkless"), + ], + "body_ignore": ["Wikipedia", "SURROUNDING Debian TEXT", "Linkless"], + "body_extract": [ + ("Wikipedia", "https://en.wikipedia.org/"), + ("SURROUNDING Debian TEXT", "ftp://ftp.us.debian.org/"), + ("Linkless", None), + ], + "footer_ignore": [ + "Footer", + "Multiple links: Only first captured.", + None, + ], + "footer_extract": [ + ("Footer", "https://en.wikipedia.org/wiki/Page_footer"), + ("Multiple links: Only first captured.", "1"), + None, + ], + } + + data_exp = gh_13141_expected["body_ignore"] + foot_exp = gh_13141_expected["footer_ignore"] + head_exp = gh_13141_expected["head_ignore"] + if arg == "all": + data_exp = gh_13141_expected["body_extract"] + foot_exp = gh_13141_expected["footer_extract"] + head_exp = gh_13141_expected["head_extract"] + elif arg == "body": + data_exp = gh_13141_expected["body_extract"] + elif arg == "footer": + foot_exp = gh_13141_expected["footer_extract"] + elif arg == "header": + head_exp = gh_13141_expected["head_extract"] + + result = flavor_read_html(StringIO(gh_13141_data), extract_links=arg)[0] + expected = DataFrame([data_exp, foot_exp], columns=head_exp) + expected = expected.fillna(np.nan) + tm.assert_frame_equal(result, expected) + + def test_extract_links_bad(self, spam_data): + msg = ( + "`extract_links` must be one of " + '{None, "header", "footer", "body", "all"}, got "incorrect"' + ) + with pytest.raises(ValueError, match=msg): + read_html(spam_data, extract_links="incorrect") + + def test_extract_links_all_no_header(self, flavor_read_html): + # GH 48316 + data = """ + + + + +
+ Google.com +
+ """ + result = flavor_read_html(StringIO(data), extract_links="all")[0] + expected = DataFrame([[("Google.com", "https://google.com")]]) + tm.assert_frame_equal(result, expected) + + def test_invalid_dtype_backend(self): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + read_html("test", dtype_backend="numpy") + + def test_style_tag(self, flavor_read_html): + # GH 48316 + data = """ + + + + + + + + + + + + + +
+ + A + B
A1B1
A2B2
+ """ + result = flavor_read_html(StringIO(data))[0] + expected = DataFrame(data=[["A1", "B1"], ["A2", "B2"]], columns=["A", "B"]) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_http_headers.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_http_headers.py new file mode 100644 index 0000000000000000000000000000000000000000..2ca11ad1f74e6381e389577e821d37d89cc689db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_http_headers.py @@ -0,0 +1,172 @@ +""" +Tests for the pandas custom headers in http(s) requests +""" +from functools import partial +import gzip +from io import BytesIO + +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + +pytestmark = [ + pytest.mark.single_cpu, + pytest.mark.network, + pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" + ), +] + + +def gzip_bytes(response_bytes): + with BytesIO() as bio: + with gzip.GzipFile(fileobj=bio, mode="w") as zipper: + zipper.write(response_bytes) + return bio.getvalue() + + +def csv_responder(df): + return df.to_csv(index=False).encode("utf-8") + + +def gz_csv_responder(df): + return gzip_bytes(csv_responder(df)) + + +def json_responder(df): + return df.to_json().encode("utf-8") + + +def gz_json_responder(df): + return gzip_bytes(json_responder(df)) + + +def html_responder(df): + return df.to_html(index=False).encode("utf-8") + + +def parquetpyarrow_reponder(df): + return df.to_parquet(index=False, engine="pyarrow") + + +def parquetfastparquet_responder(df): + # the fastparquet engine doesn't like to write to a buffer + # it can do it via the open_with function being set appropriately + # however it automatically calls the close method and wipes the buffer + # so just overwrite that attribute on this instance to not do that + + # protected by an importorskip in the respective test + import fsspec + + df.to_parquet( + "memory://fastparquet_user_agent.parquet", + index=False, + engine="fastparquet", + compression=None, + ) + with fsspec.open("memory://fastparquet_user_agent.parquet", "rb") as f: + return f.read() + + +def pickle_respnder(df): + with BytesIO() as bio: + df.to_pickle(bio) + return bio.getvalue() + + +def stata_responder(df): + with BytesIO() as bio: + df.to_stata(bio, write_index=False) + return bio.getvalue() + + +@pytest.mark.parametrize( + "responder, read_method", + [ + (csv_responder, pd.read_csv), + (json_responder, pd.read_json), + ( + html_responder, + lambda *args, **kwargs: pd.read_html(*args, **kwargs)[0], + ), + pytest.param( + parquetpyarrow_reponder, + partial(pd.read_parquet, engine="pyarrow"), + marks=td.skip_if_no("pyarrow"), + ), + pytest.param( + parquetfastparquet_responder, + partial(pd.read_parquet, engine="fastparquet"), + # TODO(ArrayManager) fastparquet + marks=[ + td.skip_if_no("fastparquet"), + td.skip_if_no("fsspec"), + td.skip_array_manager_not_yet_implemented, + ], + ), + (pickle_respnder, pd.read_pickle), + (stata_responder, pd.read_stata), + (gz_csv_responder, pd.read_csv), + (gz_json_responder, pd.read_json), + ], +) +@pytest.mark.parametrize( + "storage_options", + [ + None, + {"User-Agent": "foo"}, + {"User-Agent": "foo", "Auth": "bar"}, + ], +) +def test_request_headers(responder, read_method, httpserver, storage_options): + expected = pd.DataFrame({"a": ["b"]}) + default_headers = ["Accept-Encoding", "Host", "Connection", "User-Agent"] + if "gz" in responder.__name__: + extra = {"Content-Encoding": "gzip"} + if storage_options is None: + storage_options = extra + else: + storage_options |= extra + else: + extra = None + expected_headers = set(default_headers).union( + storage_options.keys() if storage_options else [] + ) + httpserver.serve_content(content=responder(expected), headers=extra) + result = read_method(httpserver.url, storage_options=storage_options) + tm.assert_frame_equal(result, expected) + + request_headers = dict(httpserver.requests[0].headers) + for header in expected_headers: + exp = request_headers.pop(header) + if storage_options and header in storage_options: + assert exp == storage_options[header] + # No extra headers added + assert not request_headers + + +@pytest.mark.parametrize( + "engine", + [ + "pyarrow", + "fastparquet", + ], +) +def test_to_parquet_to_disk_with_storage_options(engine): + headers = { + "User-Agent": "custom", + "Auth": "other_custom", + } + + pytest.importorskip(engine) + + true_df = pd.DataFrame({"column_name": ["column_value"]}) + msg = ( + "storage_options passed with file object or non-fsspec file path|" + "storage_options passed with buffer, or non-supported URL" + ) + with pytest.raises(ValueError, match=msg): + true_df.to_parquet("/tmp/junk.parquet", storage_options=headers, engine=engine) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_orc.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_orc.py new file mode 100644 index 0000000000000000000000000000000000000000..a4021311fc963a41633ebec2680c7f6d79525044 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_orc.py @@ -0,0 +1,436 @@ +""" test orc compat """ +import datetime +from decimal import Decimal +from io import BytesIO +import os +import pathlib + +import numpy as np +import pytest + +import pandas as pd +from pandas import read_orc +import pandas._testing as tm +from pandas.core.arrays import StringArray + +pytest.importorskip("pyarrow.orc") + +import pyarrow as pa + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def dirpath(datapath): + return datapath("io", "data", "orc") + + +@pytest.fixture( + params=[ + np.array([1, 20], dtype="uint64"), + pd.Series(["a", "b", "a"], dtype="category"), + [pd.Interval(left=0, right=2), pd.Interval(left=0, right=5)], + [pd.Period("2022-01-03", freq="D"), pd.Period("2022-01-04", freq="D")], + ] +) +def orc_writer_dtypes_not_supported(request): + # Examples of dataframes with dtypes for which conversion to ORC + # hasn't been implemented yet, that is, Category, unsigned integers, + # interval, period and sparse. + return pd.DataFrame({"unimpl": request.param}) + + +def test_orc_reader_empty(dirpath): + columns = [ + "boolean1", + "byte1", + "short1", + "int1", + "long1", + "float1", + "double1", + "bytes1", + "string1", + ] + dtypes = [ + "bool", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "object", + "object", + ] + expected = pd.DataFrame(index=pd.RangeIndex(0)) + for colname, dtype in zip(columns, dtypes): + expected[colname] = pd.Series(dtype=dtype) + + inputfile = os.path.join(dirpath, "TestOrcFile.emptyFile.orc") + got = read_orc(inputfile, columns=columns) + + tm.assert_equal(expected, got) + + +def test_orc_reader_basic(dirpath): + data = { + "boolean1": np.array([False, True], dtype="bool"), + "byte1": np.array([1, 100], dtype="int8"), + "short1": np.array([1024, 2048], dtype="int16"), + "int1": np.array([65536, 65536], dtype="int32"), + "long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"), + "float1": np.array([1.0, 2.0], dtype="float32"), + "double1": np.array([-15.0, -5.0], dtype="float64"), + "bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"), + "string1": np.array(["hi", "bye"], dtype="object"), + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.test1.orc") + got = read_orc(inputfile, columns=data.keys()) + + tm.assert_equal(expected, got) + + +def test_orc_reader_decimal(dirpath): + # Only testing the first 10 rows of data + data = { + "_col0": np.array( + [ + Decimal("-1000.50000"), + Decimal("-999.60000"), + Decimal("-998.70000"), + Decimal("-997.80000"), + Decimal("-996.90000"), + Decimal("-995.10000"), + Decimal("-994.11000"), + Decimal("-993.12000"), + Decimal("-992.13000"), + Decimal("-991.14000"), + ], + dtype="object", + ) + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.decimal.orc") + got = read_orc(inputfile).iloc[:10] + + tm.assert_equal(expected, got) + + +def test_orc_reader_date_low(dirpath): + data = { + "time": np.array( + [ + "1900-05-05 12:34:56.100000", + "1900-05-05 12:34:56.100100", + "1900-05-05 12:34:56.100200", + "1900-05-05 12:34:56.100300", + "1900-05-05 12:34:56.100400", + "1900-05-05 12:34:56.100500", + "1900-05-05 12:34:56.100600", + "1900-05-05 12:34:56.100700", + "1900-05-05 12:34:56.100800", + "1900-05-05 12:34:56.100900", + ], + dtype="datetime64[ns]", + ), + "date": np.array( + [ + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + datetime.date(1900, 12, 25), + ], + dtype="object", + ), + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.testDate1900.orc") + got = read_orc(inputfile).iloc[:10] + + tm.assert_equal(expected, got) + + +def test_orc_reader_date_high(dirpath): + data = { + "time": np.array( + [ + "2038-05-05 12:34:56.100000", + "2038-05-05 12:34:56.100100", + "2038-05-05 12:34:56.100200", + "2038-05-05 12:34:56.100300", + "2038-05-05 12:34:56.100400", + "2038-05-05 12:34:56.100500", + "2038-05-05 12:34:56.100600", + "2038-05-05 12:34:56.100700", + "2038-05-05 12:34:56.100800", + "2038-05-05 12:34:56.100900", + ], + dtype="datetime64[ns]", + ), + "date": np.array( + [ + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + datetime.date(2038, 12, 25), + ], + dtype="object", + ), + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.testDate2038.orc") + got = read_orc(inputfile).iloc[:10] + + tm.assert_equal(expected, got) + + +def test_orc_reader_snappy_compressed(dirpath): + data = { + "int1": np.array( + [ + -1160101563, + 1181413113, + 2065821249, + -267157795, + 172111193, + 1752363137, + 1406072123, + 1911809390, + -1308542224, + -467100286, + ], + dtype="int32", + ), + "string1": np.array( + [ + "f50dcb8", + "382fdaaa", + "90758c6", + "9e8caf3f", + "ee97332b", + "d634da1", + "2bea4396", + "d67d89e8", + "ad71007e", + "e8c82066", + ], + dtype="object", + ), + } + expected = pd.DataFrame.from_dict(data) + + inputfile = os.path.join(dirpath, "TestOrcFile.testSnappy.orc") + got = read_orc(inputfile).iloc[:10] + + tm.assert_equal(expected, got) + + +def test_orc_roundtrip_file(dirpath): + # GH44554 + # PyArrow gained ORC write support with the current argument order + pytest.importorskip("pyarrow") + + data = { + "boolean1": np.array([False, True], dtype="bool"), + "byte1": np.array([1, 100], dtype="int8"), + "short1": np.array([1024, 2048], dtype="int16"), + "int1": np.array([65536, 65536], dtype="int32"), + "long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"), + "float1": np.array([1.0, 2.0], dtype="float32"), + "double1": np.array([-15.0, -5.0], dtype="float64"), + "bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"), + "string1": np.array(["hi", "bye"], dtype="object"), + } + expected = pd.DataFrame.from_dict(data) + + with tm.ensure_clean() as path: + expected.to_orc(path) + got = read_orc(path) + + tm.assert_equal(expected, got) + + +def test_orc_roundtrip_bytesio(): + # GH44554 + # PyArrow gained ORC write support with the current argument order + pytest.importorskip("pyarrow") + + data = { + "boolean1": np.array([False, True], dtype="bool"), + "byte1": np.array([1, 100], dtype="int8"), + "short1": np.array([1024, 2048], dtype="int16"), + "int1": np.array([65536, 65536], dtype="int32"), + "long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"), + "float1": np.array([1.0, 2.0], dtype="float32"), + "double1": np.array([-15.0, -5.0], dtype="float64"), + "bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"), + "string1": np.array(["hi", "bye"], dtype="object"), + } + expected = pd.DataFrame.from_dict(data) + + bytes = expected.to_orc() + got = read_orc(BytesIO(bytes)) + + tm.assert_equal(expected, got) + + +def test_orc_writer_dtypes_not_supported(orc_writer_dtypes_not_supported): + # GH44554 + # PyArrow gained ORC write support with the current argument order + pytest.importorskip("pyarrow") + + msg = "The dtype of one or more columns is not supported yet." + with pytest.raises(NotImplementedError, match=msg): + orc_writer_dtypes_not_supported.to_orc() + + +def test_orc_dtype_backend_pyarrow(): + pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "string": list("abc"), + "string_with_nan": ["a", np.nan, "c"], + "string_with_none": ["a", None, "c"], + "bytes": [b"foo", b"bar", None], + "int": list(range(1, 4)), + "float": np.arange(4.0, 7.0, dtype="float64"), + "float_with_nan": [2.0, np.nan, 3.0], + "bool": [True, False, True], + "bool_with_na": [True, False, None], + "datetime": pd.date_range("20130101", periods=3), + "datetime_with_nat": [ + pd.Timestamp("20130101"), + pd.NaT, + pd.Timestamp("20130103"), + ], + } + ) + + bytes_data = df.copy().to_orc() + result = read_orc(BytesIO(bytes_data), dtype_backend="pyarrow") + + expected = pd.DataFrame( + { + col: pd.arrays.ArrowExtensionArray(pa.array(df[col], from_pandas=True)) + for col in df.columns + } + ) + + tm.assert_frame_equal(result, expected) + + +def test_orc_dtype_backend_numpy_nullable(): + # GH#50503 + pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "string": list("abc"), + "string_with_nan": ["a", np.nan, "c"], + "string_with_none": ["a", None, "c"], + "int": list(range(1, 4)), + "int_with_nan": pd.Series([1, pd.NA, 3], dtype="Int64"), + "na_only": pd.Series([pd.NA, pd.NA, pd.NA], dtype="Int64"), + "float": np.arange(4.0, 7.0, dtype="float64"), + "float_with_nan": [2.0, np.nan, 3.0], + "bool": [True, False, True], + "bool_with_na": [True, False, None], + } + ) + + bytes_data = df.copy().to_orc() + result = read_orc(BytesIO(bytes_data), dtype_backend="numpy_nullable") + + expected = pd.DataFrame( + { + "string": StringArray(np.array(["a", "b", "c"], dtype=np.object_)), + "string_with_nan": StringArray( + np.array(["a", pd.NA, "c"], dtype=np.object_) + ), + "string_with_none": StringArray( + np.array(["a", pd.NA, "c"], dtype=np.object_) + ), + "int": pd.Series([1, 2, 3], dtype="Int64"), + "int_with_nan": pd.Series([1, pd.NA, 3], dtype="Int64"), + "na_only": pd.Series([pd.NA, pd.NA, pd.NA], dtype="Int64"), + "float": pd.Series([4.0, 5.0, 6.0], dtype="Float64"), + "float_with_nan": pd.Series([2.0, pd.NA, 3.0], dtype="Float64"), + "bool": pd.Series([True, False, True], dtype="boolean"), + "bool_with_na": pd.Series([True, False, pd.NA], dtype="boolean"), + } + ) + + tm.assert_frame_equal(result, expected) + + +def test_orc_uri_path(): + expected = pd.DataFrame({"int": list(range(1, 4))}) + with tm.ensure_clean("tmp.orc") as path: + expected.to_orc(path) + uri = pathlib.Path(path).as_uri() + result = read_orc(uri) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "index", + [ + pd.RangeIndex(start=2, stop=5, step=1), + pd.RangeIndex(start=0, stop=3, step=1, name="non-default"), + pd.Index([1, 2, 3]), + ], +) +def test_to_orc_non_default_index(index): + df = pd.DataFrame({"a": [1, 2, 3]}, index=index) + msg = ( + "orc does not support serializing a non-default index|" + "orc does not serialize index meta-data" + ) + with pytest.raises(ValueError, match=msg): + df.to_orc() + + +def test_invalid_dtype_backend(): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + df = pd.DataFrame({"int": list(range(1, 4))}) + with tm.ensure_clean("tmp.orc") as path: + df.to_orc(path) + with pytest.raises(ValueError, match=msg): + read_orc(path, dtype_backend="numpy") + + +def test_string_inference(tmp_path): + # GH#54431 + path = tmp_path / "test_string_inference.p" + df = pd.DataFrame(data={"a": ["x", "y"]}) + df.to_orc(path) + with pd.option_context("future.infer_string", True): + result = read_orc(path) + expected = pd.DataFrame( + data={"a": ["x", "y"]}, + dtype="string[pyarrow_numpy]", + columns=pd.Index(["a"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_parquet.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b94177eedb20d9e5c2ae354006a2b7d8a2b42d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_parquet.py @@ -0,0 +1,1424 @@ +""" test parquet compat """ +import datetime +from decimal import Decimal +from io import BytesIO +import os +import pathlib + +import numpy as np +import pytest + +from pandas._config import using_copy_on_write +from pandas._config.config import _get_option + +from pandas.compat import is_platform_windows +from pandas.compat.pyarrow import ( + pa_version_under11p0, + pa_version_under13p0, + pa_version_under15p0, +) + +import pandas as pd +import pandas._testing as tm +from pandas.util.version import Version + +from pandas.io.parquet import ( + FastParquetImpl, + PyArrowImpl, + get_engine, + read_parquet, + to_parquet, +) + +try: + import pyarrow + + _HAVE_PYARROW = True +except ImportError: + _HAVE_PYARROW = False + +try: + import fastparquet + + _HAVE_FASTPARQUET = True +except ImportError: + _HAVE_FASTPARQUET = False + + +# TODO(ArrayManager) fastparquet relies on BlockManager internals + +pytestmark = [ + pytest.mark.filterwarnings("ignore:DataFrame._data is deprecated:FutureWarning"), + pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" + ), +] + + +# setup engines & skips +@pytest.fixture( + params=[ + pytest.param( + "fastparquet", + marks=pytest.mark.skipif( + not _HAVE_FASTPARQUET + or _get_option("mode.data_manager", silent=True) == "array", + reason="fastparquet is not installed or ArrayManager is used", + ), + ), + pytest.param( + "pyarrow", + marks=pytest.mark.skipif( + not _HAVE_PYARROW, reason="pyarrow is not installed" + ), + ), + ] +) +def engine(request): + return request.param + + +@pytest.fixture +def pa(): + if not _HAVE_PYARROW: + pytest.skip("pyarrow is not installed") + return "pyarrow" + + +@pytest.fixture +def fp(): + if not _HAVE_FASTPARQUET: + pytest.skip("fastparquet is not installed") + elif _get_option("mode.data_manager", silent=True) == "array": + pytest.skip("ArrayManager is not supported with fastparquet") + return "fastparquet" + + +@pytest.fixture +def df_compat(): + return pd.DataFrame({"A": [1, 2, 3], "B": "foo"}) + + +@pytest.fixture +def df_cross_compat(): + df = pd.DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + # 'c': np.arange(3, 6).astype('u1'), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("20130101", periods=3), + # 'g': pd.date_range('20130101', periods=3, + # tz='US/Eastern'), + # 'h': pd.date_range('20130101', periods=3, freq='ns') + } + ) + return df + + +@pytest.fixture +def df_full(): + return pd.DataFrame( + { + "string": list("abc"), + "string_with_nan": ["a", np.nan, "c"], + "string_with_none": ["a", None, "c"], + "bytes": [b"foo", b"bar", b"baz"], + "unicode": ["foo", "bar", "baz"], + "int": list(range(1, 4)), + "uint": np.arange(3, 6).astype("u1"), + "float": np.arange(4.0, 7.0, dtype="float64"), + "float_with_nan": [2.0, np.nan, 3.0], + "bool": [True, False, True], + "datetime": pd.date_range("20130101", periods=3), + "datetime_with_nat": [ + pd.Timestamp("20130101"), + pd.NaT, + pd.Timestamp("20130103"), + ], + } + ) + + +@pytest.fixture( + params=[ + datetime.datetime.now(datetime.timezone.utc), + datetime.datetime.now(datetime.timezone.min), + datetime.datetime.now(datetime.timezone.max), + datetime.datetime.strptime("2019-01-04T16:41:24+0200", "%Y-%m-%dT%H:%M:%S%z"), + datetime.datetime.strptime("2019-01-04T16:41:24+0215", "%Y-%m-%dT%H:%M:%S%z"), + datetime.datetime.strptime("2019-01-04T16:41:24-0200", "%Y-%m-%dT%H:%M:%S%z"), + datetime.datetime.strptime("2019-01-04T16:41:24-0215", "%Y-%m-%dT%H:%M:%S%z"), + ] +) +def timezone_aware_date_list(request): + return request.param + + +def check_round_trip( + df, + engine=None, + path=None, + write_kwargs=None, + read_kwargs=None, + expected=None, + check_names=True, + check_like=False, + check_dtype=True, + repeat=2, +): + """Verify parquet serializer and deserializer produce the same results. + + Performs a pandas to disk and disk to pandas round trip, + then compares the 2 resulting DataFrames to verify equality. + + Parameters + ---------- + df: Dataframe + engine: str, optional + 'pyarrow' or 'fastparquet' + path: str, optional + write_kwargs: dict of str:str, optional + read_kwargs: dict of str:str, optional + expected: DataFrame, optional + Expected deserialization result, otherwise will be equal to `df` + check_names: list of str, optional + Closed set of column names to be compared + check_like: bool, optional + If True, ignore the order of index & columns. + repeat: int, optional + How many times to repeat the test + """ + write_kwargs = write_kwargs or {"compression": None} + read_kwargs = read_kwargs or {} + + if expected is None: + expected = df + + if engine: + write_kwargs["engine"] = engine + read_kwargs["engine"] = engine + + def compare(repeat): + for _ in range(repeat): + df.to_parquet(path, **write_kwargs) + actual = read_parquet(path, **read_kwargs) + + if "string_with_nan" in expected: + expected.loc[1, "string_with_nan"] = None + tm.assert_frame_equal( + expected, + actual, + check_names=check_names, + check_like=check_like, + check_dtype=check_dtype, + ) + + if path is None: + with tm.ensure_clean() as path: + compare(repeat) + else: + compare(repeat) + + +def check_partition_names(path, expected): + """Check partitions of a parquet file are as expected. + + Parameters + ---------- + path: str + Path of the dataset. + expected: iterable of str + Expected partition names. + """ + import pyarrow.dataset as ds + + dataset = ds.dataset(path, partitioning="hive") + assert dataset.partitioning.schema.names == expected + + +def test_invalid_engine(df_compat): + msg = "engine must be one of 'pyarrow', 'fastparquet'" + with pytest.raises(ValueError, match=msg): + check_round_trip(df_compat, "foo", "bar") + + +def test_options_py(df_compat, pa): + # use the set option + + with pd.option_context("io.parquet.engine", "pyarrow"): + check_round_trip(df_compat) + + +def test_options_fp(df_compat, fp): + # use the set option + + with pd.option_context("io.parquet.engine", "fastparquet"): + check_round_trip(df_compat) + + +def test_options_auto(df_compat, fp, pa): + # use the set option + + with pd.option_context("io.parquet.engine", "auto"): + check_round_trip(df_compat) + + +def test_options_get_engine(fp, pa): + assert isinstance(get_engine("pyarrow"), PyArrowImpl) + assert isinstance(get_engine("fastparquet"), FastParquetImpl) + + with pd.option_context("io.parquet.engine", "pyarrow"): + assert isinstance(get_engine("auto"), PyArrowImpl) + assert isinstance(get_engine("pyarrow"), PyArrowImpl) + assert isinstance(get_engine("fastparquet"), FastParquetImpl) + + with pd.option_context("io.parquet.engine", "fastparquet"): + assert isinstance(get_engine("auto"), FastParquetImpl) + assert isinstance(get_engine("pyarrow"), PyArrowImpl) + assert isinstance(get_engine("fastparquet"), FastParquetImpl) + + with pd.option_context("io.parquet.engine", "auto"): + assert isinstance(get_engine("auto"), PyArrowImpl) + assert isinstance(get_engine("pyarrow"), PyArrowImpl) + assert isinstance(get_engine("fastparquet"), FastParquetImpl) + + +def test_get_engine_auto_error_message(): + # Expect different error messages from get_engine(engine="auto") + # if engines aren't installed vs. are installed but bad version + from pandas.compat._optional import VERSIONS + + # Do we have engines installed, but a bad version of them? + pa_min_ver = VERSIONS.get("pyarrow") + fp_min_ver = VERSIONS.get("fastparquet") + have_pa_bad_version = ( + False + if not _HAVE_PYARROW + else Version(pyarrow.__version__) < Version(pa_min_ver) + ) + have_fp_bad_version = ( + False + if not _HAVE_FASTPARQUET + else Version(fastparquet.__version__) < Version(fp_min_ver) + ) + # Do we have usable engines installed? + have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version + have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version + + if not have_usable_pa and not have_usable_fp: + # No usable engines found. + if have_pa_bad_version: + match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow." + with pytest.raises(ImportError, match=match): + get_engine("auto") + else: + match = "Missing optional dependency .pyarrow." + with pytest.raises(ImportError, match=match): + get_engine("auto") + + if have_fp_bad_version: + match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet." + with pytest.raises(ImportError, match=match): + get_engine("auto") + else: + match = "Missing optional dependency .fastparquet." + with pytest.raises(ImportError, match=match): + get_engine("auto") + + +def test_cross_engine_pa_fp(df_cross_compat, pa, fp): + # cross-compat with differing reading/writing engines + + df = df_cross_compat + with tm.ensure_clean() as path: + df.to_parquet(path, engine=pa, compression=None) + + result = read_parquet(path, engine=fp) + tm.assert_frame_equal(result, df) + + result = read_parquet(path, engine=fp, columns=["a", "d"]) + tm.assert_frame_equal(result, df[["a", "d"]]) + + +def test_cross_engine_fp_pa(df_cross_compat, pa, fp): + # cross-compat with differing reading/writing engines + df = df_cross_compat + with tm.ensure_clean() as path: + df.to_parquet(path, engine=fp, compression=None) + + result = read_parquet(path, engine=pa) + tm.assert_frame_equal(result, df) + + result = read_parquet(path, engine=pa, columns=["a", "d"]) + tm.assert_frame_equal(result, df[["a", "d"]]) + + +def test_parquet_pos_args_deprecation(engine): + # GH-54229 + df = pd.DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_parquet except for the " + r"argument 'path' will be keyword-only." + ) + with tm.ensure_clean() as path: + with tm.assert_produces_warning( + FutureWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + df.to_parquet(path, engine) + + +class Base: + def check_error_on_write(self, df, engine, exc, err_msg): + # check that we are raising the exception on writing + with tm.ensure_clean() as path: + with pytest.raises(exc, match=err_msg): + to_parquet(df, path, engine, compression=None) + + def check_external_error_on_write(self, df, engine, exc): + # check that an external library is raising the exception on writing + with tm.ensure_clean() as path: + with tm.external_error_raised(exc): + to_parquet(df, path, engine, compression=None) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_parquet_read_from_url(self, httpserver, datapath, df_compat, engine): + if engine != "auto": + pytest.importorskip(engine) + with open(datapath("io", "data", "parquet", "simple.parquet"), mode="rb") as f: + httpserver.serve_content(content=f.read()) + df = read_parquet(httpserver.url) + tm.assert_frame_equal(df, df_compat) + + +class TestBasic(Base): + def test_error(self, engine): + for obj in [ + pd.Series([1, 2, 3]), + 1, + "foo", + pd.Timestamp("20130101"), + np.array([1, 2, 3]), + ]: + msg = "to_parquet only supports IO with DataFrames" + self.check_error_on_write(obj, engine, ValueError, msg) + + def test_columns_dtypes(self, engine): + df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) + + # unicode + df.columns = ["foo", "bar"] + check_round_trip(df, engine) + + @pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"]) + def test_compression(self, engine, compression): + df = pd.DataFrame({"A": [1, 2, 3]}) + check_round_trip(df, engine, write_kwargs={"compression": compression}) + + def test_read_columns(self, engine): + # GH18154 + df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) + + expected = pd.DataFrame({"string": list("abc")}) + check_round_trip( + df, engine, expected=expected, read_kwargs={"columns": ["string"]} + ) + + def test_read_filters(self, engine, tmp_path): + df = pd.DataFrame( + { + "int": list(range(4)), + "part": list("aabb"), + } + ) + + expected = pd.DataFrame({"int": [0, 1]}) + check_round_trip( + df, + engine, + path=tmp_path, + expected=expected, + write_kwargs={"partition_cols": ["part"]}, + read_kwargs={"filters": [("part", "==", "a")], "columns": ["int"]}, + repeat=1, + ) + + def test_write_index(self, engine, using_copy_on_write, request): + check_names = engine != "fastparquet" + if using_copy_on_write and engine == "fastparquet": + request.applymarker( + pytest.mark.xfail(reason="fastparquet write into index") + ) + + df = pd.DataFrame({"A": [1, 2, 3]}) + check_round_trip(df, engine) + + indexes = [ + [2, 3, 4], + pd.date_range("20130101", periods=3), + list("abc"), + [1, 3, 4], + ] + # non-default index + for index in indexes: + df.index = index + if isinstance(index, pd.DatetimeIndex): + df.index = df.index._with_freq(None) # freq doesn't round-trip + check_round_trip(df, engine, check_names=check_names) + + # index with meta-data + df.index = [0, 1, 2] + df.index.name = "foo" + check_round_trip(df, engine) + + def test_write_multiindex(self, pa): + # Not supported in fastparquet as of 0.1.3 or older pyarrow version + engine = pa + + df = pd.DataFrame({"A": [1, 2, 3]}) + index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]) + df.index = index + check_round_trip(df, engine) + + def test_multiindex_with_columns(self, pa): + engine = pa + dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS") + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((2 * len(dates), 3)), + columns=list("ABC"), + ) + index1 = pd.MultiIndex.from_product( + [["Level1", "Level2"], dates], names=["level", "date"] + ) + index2 = index1.copy(names=None) + for index in [index1, index2]: + df.index = index + + check_round_trip(df, engine) + check_round_trip( + df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]] + ) + + def test_write_ignoring_index(self, engine): + # ENH 20768 + # Ensure index=False omits the index from the written Parquet file. + df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]}) + + write_kwargs = {"compression": None, "index": False} + + # Because we're dropping the index, we expect the loaded dataframe to + # have the default integer index. + expected = df.reset_index(drop=True) + + check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected) + + # Ignore custom index + df = pd.DataFrame( + {"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"] + ) + + check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected) + + # Ignore multi-indexes as well. + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + df = pd.DataFrame( + {"one": list(range(8)), "two": [-i for i in range(8)]}, index=arrays + ) + + expected = df.reset_index(drop=True) + check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected) + + def test_write_column_multiindex(self, engine): + # Not able to write column multi-indexes with non-string column names. + mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]) + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=mi_columns + ) + + if engine == "fastparquet": + self.check_error_on_write( + df, engine, TypeError, "Column name must be a string" + ) + elif engine == "pyarrow": + check_round_trip(df, engine) + + def test_write_column_multiindex_nonstring(self, engine): + # GH #34777 + + # Not able to write column multi-indexes with non-string column names + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + [1, 2, 1, 2, 1, 2, 1, 2], + ] + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((8, 8)), columns=arrays + ) + df.columns.names = ["Level1", "Level2"] + if engine == "fastparquet": + self.check_error_on_write(df, engine, ValueError, "Column name") + elif engine == "pyarrow": + check_round_trip(df, engine) + + def test_write_column_multiindex_string(self, pa): + # GH #34777 + # Not supported in fastparquet as of 0.1.3 + engine = pa + + # Write column multi-indexes with string column names + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((8, 8)), columns=arrays + ) + df.columns.names = ["ColLevel1", "ColLevel2"] + + check_round_trip(df, engine) + + def test_write_column_index_string(self, pa): + # GH #34777 + # Not supported in fastparquet as of 0.1.3 + engine = pa + + # Write column indexes with string column names + arrays = ["bar", "baz", "foo", "qux"] + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), columns=arrays + ) + df.columns.name = "StringCol" + + check_round_trip(df, engine) + + def test_write_column_index_nonstring(self, engine): + # GH #34777 + + # Write column indexes with string column names + arrays = [1, 2, 3, 4] + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), columns=arrays + ) + df.columns.name = "NonStringCol" + if engine == "fastparquet": + self.check_error_on_write( + df, engine, TypeError, "Column name must be a string" + ) + else: + check_round_trip(df, engine) + + def test_dtype_backend(self, engine, request): + pq = pytest.importorskip("pyarrow.parquet") + + if engine == "fastparquet": + # We are manually disabling fastparquet's + # nullable dtype support pending discussion + mark = pytest.mark.xfail( + reason="Fastparquet nullable dtype support is disabled" + ) + request.applymarker(mark) + + table = pyarrow.table( + { + "a": pyarrow.array([1, 2, 3, None], "int64"), + "b": pyarrow.array([1, 2, 3, None], "uint8"), + "c": pyarrow.array(["a", "b", "c", None]), + "d": pyarrow.array([True, False, True, None]), + # Test that nullable dtypes used even in absence of nulls + "e": pyarrow.array([1, 2, 3, 4], "int64"), + # GH 45694 + "f": pyarrow.array([1.0, 2.0, 3.0, None], "float32"), + "g": pyarrow.array([1.0, 2.0, 3.0, None], "float64"), + } + ) + with tm.ensure_clean() as path: + # write manually with pyarrow to write integers + pq.write_table(table, path) + result1 = read_parquet(path, engine=engine) + result2 = read_parquet(path, engine=engine, dtype_backend="numpy_nullable") + + assert result1["a"].dtype == np.dtype("float64") + expected = pd.DataFrame( + { + "a": pd.array([1, 2, 3, None], dtype="Int64"), + "b": pd.array([1, 2, 3, None], dtype="UInt8"), + "c": pd.array(["a", "b", "c", None], dtype="string"), + "d": pd.array([True, False, True, None], dtype="boolean"), + "e": pd.array([1, 2, 3, 4], dtype="Int64"), + "f": pd.array([1.0, 2.0, 3.0, None], dtype="Float32"), + "g": pd.array([1.0, 2.0, 3.0, None], dtype="Float64"), + } + ) + if engine == "fastparquet": + # Fastparquet doesn't support string columns yet + # Only int and boolean + result2 = result2.drop("c", axis=1) + expected = expected.drop("c", axis=1) + tm.assert_frame_equal(result2, expected) + + @pytest.mark.parametrize( + "dtype", + [ + "Int64", + "UInt8", + "boolean", + "object", + "datetime64[ns, UTC]", + "float", + "period[D]", + "Float64", + "string", + ], + ) + def test_read_empty_array(self, pa, dtype): + # GH #41241 + df = pd.DataFrame( + { + "value": pd.array([], dtype=dtype), + } + ) + # GH 45694 + expected = None + if dtype == "float": + expected = pd.DataFrame( + { + "value": pd.array([], dtype="Float64"), + } + ) + check_round_trip( + df, pa, read_kwargs={"dtype_backend": "numpy_nullable"}, expected=expected + ) + + +class TestParquetPyArrow(Base): + def test_basic(self, pa, df_full): + df = df_full + + # additional supported types for pyarrow + dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels") + dti = dti._with_freq(None) # freq doesn't round-trip + df["datetime_tz"] = dti + df["bool_with_none"] = [True, None, True] + + check_round_trip(df, pa) + + def test_basic_subset_columns(self, pa, df_full): + # GH18628 + + df = df_full + # additional supported types for pyarrow + df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels") + + check_round_trip( + df, + pa, + expected=df[["string", "int"]], + read_kwargs={"columns": ["string", "int"]}, + ) + + def test_to_bytes_without_path_or_buf_provided(self, pa, df_full): + # GH 37105 + buf_bytes = df_full.to_parquet(engine=pa) + assert isinstance(buf_bytes, bytes) + + buf_stream = BytesIO(buf_bytes) + res = read_parquet(buf_stream) + + expected = df_full.copy() + expected.loc[1, "string_with_nan"] = None + tm.assert_frame_equal(res, expected) + + def test_duplicate_columns(self, pa): + # not currently able to handle duplicate columns + df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy() + self.check_error_on_write(df, pa, ValueError, "Duplicate column names found") + + def test_timedelta(self, pa): + df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)}) + check_round_trip(df, pa) + + def test_unsupported(self, pa): + # mixed python objects + df = pd.DataFrame({"a": ["a", 1, 2.0]}) + # pyarrow 0.11 raises ArrowTypeError + # older pyarrows raise ArrowInvalid + self.check_external_error_on_write(df, pa, pyarrow.ArrowException) + + def test_unsupported_float16(self, pa): + # #44847, #44914 + # Not able to write float 16 column using pyarrow. + data = np.arange(2, 10, dtype=np.float16) + df = pd.DataFrame(data=data, columns=["fp16"]) + if pa_version_under15p0: + self.check_external_error_on_write(df, pa, pyarrow.ArrowException) + else: + check_round_trip(df, pa) + + @pytest.mark.xfail( + is_platform_windows(), + reason=( + "PyArrow does not cleanup of partial files dumps when unsupported " + "dtypes are passed to_parquet function in windows" + ), + ) + @pytest.mark.skipif(not pa_version_under15p0, reason="float16 works on 15") + @pytest.mark.parametrize("path_type", [str, pathlib.Path]) + def test_unsupported_float16_cleanup(self, pa, path_type): + # #44847, #44914 + # Not able to write float 16 column using pyarrow. + # Tests cleanup by pyarrow in case of an error + data = np.arange(2, 10, dtype=np.float16) + df = pd.DataFrame(data=data, columns=["fp16"]) + + with tm.ensure_clean() as path_str: + path = path_type(path_str) + with tm.external_error_raised(pyarrow.ArrowException): + df.to_parquet(path=path, engine=pa) + assert not os.path.isfile(path) + + def test_categorical(self, pa): + # supported in >= 0.7.0 + df = pd.DataFrame() + df["a"] = pd.Categorical(list("abcdef")) + + # test for null, out-of-order values, and unobserved category + df["b"] = pd.Categorical( + ["bar", "foo", "foo", "bar", None, "bar"], + dtype=pd.CategoricalDtype(["foo", "bar", "baz"]), + ) + + # test for ordered flag + df["c"] = pd.Categorical( + ["a", "b", "c", "a", "c", "b"], categories=["b", "c", "d"], ordered=True + ) + + check_round_trip(df, pa) + + @pytest.mark.single_cpu + def test_s3_roundtrip_explicit_fs(self, df_compat, s3_public_bucket, pa, s3so): + s3fs = pytest.importorskip("s3fs") + s3 = s3fs.S3FileSystem(**s3so) + kw = {"filesystem": s3} + check_round_trip( + df_compat, + pa, + path=f"{s3_public_bucket.name}/pyarrow.parquet", + read_kwargs=kw, + write_kwargs=kw, + ) + + @pytest.mark.single_cpu + def test_s3_roundtrip(self, df_compat, s3_public_bucket, pa, s3so): + # GH #19134 + s3so = {"storage_options": s3so} + check_round_trip( + df_compat, + pa, + path=f"s3://{s3_public_bucket.name}/pyarrow.parquet", + read_kwargs=s3so, + write_kwargs=s3so, + ) + + @pytest.mark.single_cpu + @pytest.mark.parametrize( + "partition_col", + [ + ["A"], + [], + ], + ) + def test_s3_roundtrip_for_dir( + self, df_compat, s3_public_bucket, pa, partition_col, s3so + ): + pytest.importorskip("s3fs") + # GH #26388 + expected_df = df_compat.copy() + + # GH #35791 + if partition_col: + expected_df = expected_df.astype(dict.fromkeys(partition_col, np.int32)) + partition_col_type = "category" + + expected_df[partition_col] = expected_df[partition_col].astype( + partition_col_type + ) + + check_round_trip( + df_compat, + pa, + expected=expected_df, + path=f"s3://{s3_public_bucket.name}/parquet_dir", + read_kwargs={"storage_options": s3so}, + write_kwargs={ + "partition_cols": partition_col, + "compression": None, + "storage_options": s3so, + }, + check_like=True, + repeat=1, + ) + + def test_read_file_like_obj_support(self, df_compat): + pytest.importorskip("pyarrow") + buffer = BytesIO() + df_compat.to_parquet(buffer) + df_from_buf = read_parquet(buffer) + tm.assert_frame_equal(df_compat, df_from_buf) + + def test_expand_user(self, df_compat, monkeypatch): + pytest.importorskip("pyarrow") + monkeypatch.setenv("HOME", "TestingUser") + monkeypatch.setenv("USERPROFILE", "TestingUser") + with pytest.raises(OSError, match=r".*TestingUser.*"): + read_parquet("~/file.parquet") + with pytest.raises(OSError, match=r".*TestingUser.*"): + df_compat.to_parquet("~/file.parquet") + + def test_partition_cols_supported(self, tmp_path, pa, df_full): + # GH #23283 + partition_cols = ["bool", "int"] + df = df_full + df.to_parquet(tmp_path, partition_cols=partition_cols, compression=None) + check_partition_names(tmp_path, partition_cols) + assert read_parquet(tmp_path).shape == df.shape + + def test_partition_cols_string(self, tmp_path, pa, df_full): + # GH #27117 + partition_cols = "bool" + partition_cols_list = [partition_cols] + df = df_full + df.to_parquet(tmp_path, partition_cols=partition_cols, compression=None) + check_partition_names(tmp_path, partition_cols_list) + assert read_parquet(tmp_path).shape == df.shape + + @pytest.mark.parametrize( + "path_type", [str, lambda x: x], ids=["string", "pathlib.Path"] + ) + def test_partition_cols_pathlib(self, tmp_path, pa, df_compat, path_type): + # GH 35902 + + partition_cols = "B" + partition_cols_list = [partition_cols] + df = df_compat + + path = path_type(tmp_path) + df.to_parquet(path, partition_cols=partition_cols_list) + assert read_parquet(path).shape == df.shape + + def test_empty_dataframe(self, pa): + # GH #27339 + df = pd.DataFrame(index=[], columns=[]) + check_round_trip(df, pa) + + def test_write_with_schema(self, pa): + import pyarrow + + df = pd.DataFrame({"x": [0, 1]}) + schema = pyarrow.schema([pyarrow.field("x", type=pyarrow.bool_())]) + out_df = df.astype(bool) + check_round_trip(df, pa, write_kwargs={"schema": schema}, expected=out_df) + + def test_additional_extension_arrays(self, pa): + # test additional ExtensionArrays that are supported through the + # __arrow_array__ protocol + pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype="Int64"), + "b": pd.Series([1, 2, 3], dtype="UInt32"), + "c": pd.Series(["a", None, "c"], dtype="string"), + } + ) + check_round_trip(df, pa) + + df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")}) + check_round_trip(df, pa) + + def test_pyarrow_backed_string_array(self, pa, string_storage): + # test ArrowStringArray supported through the __arrow_array__ protocol + pytest.importorskip("pyarrow") + df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="string[pyarrow]")}) + with pd.option_context("string_storage", string_storage): + check_round_trip(df, pa, expected=df.astype(f"string[{string_storage}]")) + + def test_additional_extension_types(self, pa): + # test additional ExtensionArrays that are supported through the + # __arrow_array__ protocol + by defining a custom ExtensionType + pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "c": pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]), + "d": pd.period_range("2012-01-01", periods=3, freq="D"), + # GH-45881 issue with interval with datetime64[ns] subtype + "e": pd.IntervalIndex.from_breaks( + pd.date_range("2012-01-01", periods=4, freq="D") + ), + } + ) + check_round_trip(df, pa) + + def test_timestamp_nanoseconds(self, pa): + # with version 2.6, pyarrow defaults to writing the nanoseconds, so + # this should work without error + # Note in previous pyarrows(<7.0.0), only the pseudo-version 2.0 was available + ver = "2.6" + df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1ns", periods=10)}) + check_round_trip(df, pa, write_kwargs={"version": ver}) + + def test_timezone_aware_index(self, request, pa, timezone_aware_date_list): + if timezone_aware_date_list.tzinfo != datetime.timezone.utc: + request.applymarker( + pytest.mark.xfail( + reason="temporary skip this test until it is properly resolved: " + "https://github.com/pandas-dev/pandas/issues/37286" + ) + ) + idx = 5 * [timezone_aware_date_list] + df = pd.DataFrame(index=idx, data={"index_as_col": idx}) + + # see gh-36004 + # compare time(zone) values only, skip their class: + # pyarrow always creates fixed offset timezones using pytz.FixedOffset() + # even if it was datetime.timezone() originally + # + # technically they are the same: + # they both implement datetime.tzinfo + # they both wrap datetime.timedelta() + # this use-case sets the resolution to 1 minute + check_round_trip(df, pa, check_dtype=False) + + def test_filter_row_groups(self, pa): + # https://github.com/pandas-dev/pandas/issues/26551 + pytest.importorskip("pyarrow") + df = pd.DataFrame({"a": list(range(3))}) + with tm.ensure_clean() as path: + df.to_parquet(path, engine=pa) + result = read_parquet(path, pa, filters=[("a", "==", 0)]) + assert len(result) == 1 + + def test_read_parquet_manager(self, pa, using_array_manager): + # ensure that read_parquet honors the pandas.options.mode.data_manager option + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["A", "B", "C"] + ) + + with tm.ensure_clean() as path: + df.to_parquet(path, engine=pa) + result = read_parquet(path, pa) + if using_array_manager: + assert isinstance(result._mgr, pd.core.internals.ArrayManager) + else: + assert isinstance(result._mgr, pd.core.internals.BlockManager) + + def test_read_dtype_backend_pyarrow_config(self, pa, df_full): + import pyarrow + + df = df_full + + # additional supported types for pyarrow + dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels") + dti = dti._with_freq(None) # freq doesn't round-trip + df["datetime_tz"] = dti + df["bool_with_none"] = [True, None, True] + + pa_table = pyarrow.Table.from_pandas(df) + expected = pa_table.to_pandas(types_mapper=pd.ArrowDtype) + if pa_version_under13p0: + # pyarrow infers datetimes as us instead of ns + expected["datetime"] = expected["datetime"].astype("timestamp[us][pyarrow]") + expected["datetime_with_nat"] = expected["datetime_with_nat"].astype( + "timestamp[us][pyarrow]" + ) + expected["datetime_tz"] = expected["datetime_tz"].astype( + pd.ArrowDtype(pyarrow.timestamp(unit="us", tz="Europe/Brussels")) + ) + + check_round_trip( + df, + engine=pa, + read_kwargs={"dtype_backend": "pyarrow"}, + expected=expected, + ) + + def test_read_dtype_backend_pyarrow_config_index(self, pa): + df = pd.DataFrame( + {"a": [1, 2]}, index=pd.Index([3, 4], name="test"), dtype="int64[pyarrow]" + ) + expected = df.copy() + import pyarrow + + if Version(pyarrow.__version__) > Version("11.0.0"): + expected.index = expected.index.astype("int64[pyarrow]") + check_round_trip( + df, + engine=pa, + read_kwargs={"dtype_backend": "pyarrow"}, + expected=expected, + ) + + def test_columns_dtypes_not_invalid(self, pa): + df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) + + # numeric + df.columns = [0, 1] + check_round_trip(df, pa) + + # bytes + df.columns = [b"foo", b"bar"] + with pytest.raises(NotImplementedError, match="|S3"): + # Bytes fails on read_parquet + check_round_trip(df, pa) + + # python object + df.columns = [ + datetime.datetime(2011, 1, 1, 0, 0), + datetime.datetime(2011, 1, 1, 1, 1), + ] + check_round_trip(df, pa) + + def test_empty_columns(self, pa): + # GH 52034 + df = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name")) + check_round_trip(df, pa) + + def test_df_attrs_persistence(self, tmp_path, pa): + path = tmp_path / "test_df_metadata.p" + df = pd.DataFrame(data={1: [1]}) + df.attrs = {"test_attribute": 1} + df.to_parquet(path, engine=pa) + new_df = read_parquet(path, engine=pa) + assert new_df.attrs == df.attrs + + def test_string_inference(self, tmp_path, pa): + # GH#54431 + path = tmp_path / "test_string_inference.p" + df = pd.DataFrame(data={"a": ["x", "y"]}, index=["a", "b"]) + df.to_parquet(path, engine="pyarrow") + with pd.option_context("future.infer_string", True): + result = read_parquet(path, engine="pyarrow") + expected = pd.DataFrame( + data={"a": ["x", "y"]}, + dtype="string[pyarrow_numpy]", + index=pd.Index(["a", "b"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.skipif(pa_version_under11p0, reason="not supported before 11.0") + def test_roundtrip_decimal(self, tmp_path, pa): + # GH#54768 + import pyarrow as pa + + path = tmp_path / "decimal.p" + df = pd.DataFrame({"a": [Decimal("123.00")]}, dtype="string[pyarrow]") + df.to_parquet(path, schema=pa.schema([("a", pa.decimal128(5))])) + result = read_parquet(path) + expected = pd.DataFrame({"a": ["123"]}, dtype="string[python]") + tm.assert_frame_equal(result, expected) + + def test_infer_string_large_string_type(self, tmp_path, pa): + # GH#54798 + import pyarrow as pa + import pyarrow.parquet as pq + + path = tmp_path / "large_string.p" + + table = pa.table({"a": pa.array([None, "b", "c"], pa.large_string())}) + pq.write_table(table, path) + + with pd.option_context("future.infer_string", True): + result = read_parquet(path) + expected = pd.DataFrame( + data={"a": [None, "b", "c"]}, + dtype="string[pyarrow_numpy]", + columns=pd.Index(["a"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) + + # NOTE: this test is not run by default, because it requires a lot of memory (>5GB) + # @pytest.mark.slow + # def test_string_column_above_2GB(self, tmp_path, pa): + # # https://github.com/pandas-dev/pandas/issues/55606 + # # above 2GB of string data + # v1 = b"x" * 100000000 + # v2 = b"x" * 147483646 + # df = pd.DataFrame({"strings": [v1] * 20 + [v2] + ["x"] * 20}, dtype="string") + # df.to_parquet(tmp_path / "test.parquet") + # result = read_parquet(tmp_path / "test.parquet") + # assert result["strings"].dtype == "string" + + +class TestParquetFastParquet(Base): + def test_basic(self, fp, df_full): + df = df_full + + dti = pd.date_range("20130101", periods=3, tz="US/Eastern") + dti = dti._with_freq(None) # freq doesn't round-trip + df["datetime_tz"] = dti + df["timedelta"] = pd.timedelta_range("1 day", periods=3) + check_round_trip(df, fp) + + def test_columns_dtypes_invalid(self, fp): + df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) + + err = TypeError + msg = "Column name must be a string" + + # numeric + df.columns = [0, 1] + self.check_error_on_write(df, fp, err, msg) + + # bytes + df.columns = [b"foo", b"bar"] + self.check_error_on_write(df, fp, err, msg) + + # python object + df.columns = [ + datetime.datetime(2011, 1, 1, 0, 0), + datetime.datetime(2011, 1, 1, 1, 1), + ] + self.check_error_on_write(df, fp, err, msg) + + def test_duplicate_columns(self, fp): + # not currently able to handle duplicate columns + df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy() + msg = "Cannot create parquet dataset with duplicate column names" + self.check_error_on_write(df, fp, ValueError, msg) + + def test_bool_with_none(self, fp): + df = pd.DataFrame({"a": [True, None, False]}) + expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16") + # Fastparquet bug in 0.7.1 makes it so that this dtype becomes + # float64 + check_round_trip(df, fp, expected=expected, check_dtype=False) + + def test_unsupported(self, fp): + # period + df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)}) + # error from fastparquet -> don't check exact error message + self.check_error_on_write(df, fp, ValueError, None) + + # mixed + df = pd.DataFrame({"a": ["a", 1, 2.0]}) + msg = "Can't infer object conversion type" + self.check_error_on_write(df, fp, ValueError, msg) + + def test_categorical(self, fp): + df = pd.DataFrame({"a": pd.Categorical(list("abc"))}) + check_round_trip(df, fp) + + def test_filter_row_groups(self, fp): + d = {"a": list(range(3))} + df = pd.DataFrame(d) + with tm.ensure_clean() as path: + df.to_parquet(path, engine=fp, compression=None, row_group_offsets=1) + result = read_parquet(path, fp, filters=[("a", "==", 0)]) + assert len(result) == 1 + + @pytest.mark.single_cpu + def test_s3_roundtrip(self, df_compat, s3_public_bucket, fp, s3so): + # GH #19134 + check_round_trip( + df_compat, + fp, + path=f"s3://{s3_public_bucket.name}/fastparquet.parquet", + read_kwargs={"storage_options": s3so}, + write_kwargs={"compression": None, "storage_options": s3so}, + ) + + def test_partition_cols_supported(self, tmp_path, fp, df_full): + # GH #23283 + partition_cols = ["bool", "int"] + df = df_full + df.to_parquet( + tmp_path, + engine="fastparquet", + partition_cols=partition_cols, + compression=None, + ) + assert os.path.exists(tmp_path) + import fastparquet + + actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats + assert len(actual_partition_cols) == 2 + + def test_partition_cols_string(self, tmp_path, fp, df_full): + # GH #27117 + partition_cols = "bool" + df = df_full + df.to_parquet( + tmp_path, + engine="fastparquet", + partition_cols=partition_cols, + compression=None, + ) + assert os.path.exists(tmp_path) + import fastparquet + + actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats + assert len(actual_partition_cols) == 1 + + def test_partition_on_supported(self, tmp_path, fp, df_full): + # GH #23283 + partition_cols = ["bool", "int"] + df = df_full + df.to_parquet( + tmp_path, + engine="fastparquet", + compression=None, + partition_on=partition_cols, + ) + assert os.path.exists(tmp_path) + import fastparquet + + actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats + assert len(actual_partition_cols) == 2 + + def test_error_on_using_partition_cols_and_partition_on( + self, tmp_path, fp, df_full + ): + # GH #23283 + partition_cols = ["bool", "int"] + df = df_full + msg = ( + "Cannot use both partition_on and partition_cols. Use partition_cols for " + "partitioning data" + ) + with pytest.raises(ValueError, match=msg): + df.to_parquet( + tmp_path, + engine="fastparquet", + compression=None, + partition_on=partition_cols, + partition_cols=partition_cols, + ) + + @pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index") + def test_empty_dataframe(self, fp): + # GH #27339 + df = pd.DataFrame() + expected = df.copy() + check_round_trip(df, fp, expected=expected) + + @pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index") + def test_timezone_aware_index(self, fp, timezone_aware_date_list): + idx = 5 * [timezone_aware_date_list] + + df = pd.DataFrame(index=idx, data={"index_as_col": idx}) + + expected = df.copy() + expected.index.name = "index" + check_round_trip(df, fp, expected=expected) + + def test_use_nullable_dtypes_not_supported(self, fp): + df = pd.DataFrame({"a": [1, 2]}) + + with tm.ensure_clean() as path: + df.to_parquet(path) + with pytest.raises(ValueError, match="not supported for the fastparquet"): + with tm.assert_produces_warning(FutureWarning): + read_parquet(path, engine="fastparquet", use_nullable_dtypes=True) + with pytest.raises(ValueError, match="not supported for the fastparquet"): + read_parquet(path, engine="fastparquet", dtype_backend="pyarrow") + + def test_close_file_handle_on_read_error(self): + with tm.ensure_clean("test.parquet") as path: + pathlib.Path(path).write_bytes(b"breakit") + with pytest.raises(Exception, match=""): # Not important which exception + read_parquet(path, engine="fastparquet") + # The next line raises an error on Windows if the file is still open + pathlib.Path(path).unlink(missing_ok=False) + + def test_bytes_file_name(self, engine): + # GH#48944 + df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]}) + with tm.ensure_clean("test.parquet") as path: + with open(path.encode(), "wb") as f: + df.to_parquet(f) + + result = read_parquet(path, engine=engine) + tm.assert_frame_equal(result, df) + + def test_filesystem_notimplemented(self): + pytest.importorskip("fastparquet") + df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]}) + with tm.ensure_clean() as path: + with pytest.raises( + NotImplementedError, match="filesystem is not implemented" + ): + df.to_parquet(path, engine="fastparquet", filesystem="foo") + + with tm.ensure_clean() as path: + pathlib.Path(path).write_bytes(b"foo") + with pytest.raises( + NotImplementedError, match="filesystem is not implemented" + ): + read_parquet(path, engine="fastparquet", filesystem="foo") + + def test_invalid_filesystem(self): + pytest.importorskip("pyarrow") + df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]}) + with tm.ensure_clean() as path: + with pytest.raises( + ValueError, match="filesystem must be a pyarrow or fsspec FileSystem" + ): + df.to_parquet(path, engine="pyarrow", filesystem="foo") + + with tm.ensure_clean() as path: + pathlib.Path(path).write_bytes(b"foo") + with pytest.raises( + ValueError, match="filesystem must be a pyarrow or fsspec FileSystem" + ): + read_parquet(path, engine="pyarrow", filesystem="foo") + + def test_unsupported_pa_filesystem_storage_options(self): + pa_fs = pytest.importorskip("pyarrow.fs") + df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]}) + with tm.ensure_clean() as path: + with pytest.raises( + NotImplementedError, + match="storage_options not supported with a pyarrow FileSystem.", + ): + df.to_parquet( + path, + engine="pyarrow", + filesystem=pa_fs.LocalFileSystem(), + storage_options={"foo": "bar"}, + ) + + with tm.ensure_clean() as path: + pathlib.Path(path).write_bytes(b"foo") + with pytest.raises( + NotImplementedError, + match="storage_options not supported with a pyarrow FileSystem.", + ): + read_parquet( + path, + engine="pyarrow", + filesystem=pa_fs.LocalFileSystem(), + storage_options={"foo": "bar"}, + ) + + def test_invalid_dtype_backend(self, engine): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + df = pd.DataFrame({"int": list(range(1, 4))}) + with tm.ensure_clean("tmp.parquet") as path: + df.to_parquet(path) + with pytest.raises(ValueError, match=msg): + read_parquet(path, dtype_backend="numpy") + + @pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index") + def test_empty_columns(self, fp): + # GH 52034 + df = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name")) + expected = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name")) + check_round_trip(df, fp, expected=expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_pickle.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..4f3993a038197e52c7f21fb4f4d40425e897600f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_pickle.py @@ -0,0 +1,652 @@ +""" +manage legacy pickle tests + +How to add pickle tests: + +1. Install pandas version intended to output the pickle. + +2. Execute "generate_legacy_storage_files.py" to create the pickle. +$ python generate_legacy_storage_files.py pickle + +3. Move the created pickle to "data/legacy_pickle/" directory. +""" +from __future__ import annotations + +from array import array +import bz2 +import datetime +import functools +from functools import partial +import gzip +import io +import os +from pathlib import Path +import pickle +import shutil +import tarfile +from typing import Any +import uuid +import zipfile + +import numpy as np +import pytest + +from pandas.compat import ( + get_lzma_file, + is_platform_little_endian, +) +from pandas.compat._optional import import_optional_dependency +from pandas.compat.compressors import flatten_buffer +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + period_range, +) +import pandas._testing as tm +from pandas.tests.io.generate_legacy_storage_files import create_pickle_data + +import pandas.io.common as icom +from pandas.tseries.offsets import ( + Day, + MonthEnd, +) + + +# --------------------- +# comparison functions +# --------------------- +def compare_element(result, expected, typ): + if isinstance(expected, Index): + tm.assert_index_equal(expected, result) + return + + if typ.startswith("sp_"): + tm.assert_equal(result, expected) + elif typ == "timestamp": + if expected is pd.NaT: + assert result is pd.NaT + else: + assert result == expected + else: + comparator = getattr(tm, f"assert_{typ}_equal", tm.assert_almost_equal) + comparator(result, expected) + + +# --------------------- +# tests +# --------------------- + + +@pytest.mark.parametrize( + "data", + [ + b"123", + b"123456", + bytearray(b"123"), + memoryview(b"123"), + pickle.PickleBuffer(b"123"), + array("I", [1, 2, 3]), + memoryview(b"123456").cast("B", (3, 2)), + memoryview(b"123456").cast("B", (3, 2))[::2], + np.arange(12).reshape((3, 4), order="C"), + np.arange(12).reshape((3, 4), order="F"), + np.arange(12).reshape((3, 4), order="C")[:, ::2], + ], +) +def test_flatten_buffer(data): + result = flatten_buffer(data) + expected = memoryview(data).tobytes("A") + assert result == expected + if isinstance(data, (bytes, bytearray)): + assert result is data + elif isinstance(result, memoryview): + assert result.ndim == 1 + assert result.format == "B" + assert result.contiguous + assert result.shape == (result.nbytes,) + + +def test_pickles(datapath): + if not is_platform_little_endian(): + pytest.skip("known failure on non-little endian") + + # For loop for compat with --strict-data-files + for legacy_pickle in Path(__file__).parent.glob("data/legacy_pickle/*/*.p*kl*"): + legacy_pickle = datapath(legacy_pickle) + + data = pd.read_pickle(legacy_pickle) + + for typ, dv in data.items(): + for dt, result in dv.items(): + expected = data[typ][dt] + + if typ == "series" and dt == "ts": + # GH 7748 + tm.assert_series_equal(result, expected) + assert result.index.freq == expected.index.freq + assert not result.index.freq.normalize + tm.assert_series_equal(result > 0, expected > 0) + + # GH 9291 + freq = result.index.freq + assert freq + Day(1) == Day(2) + + res = freq + pd.Timedelta(hours=1) + assert isinstance(res, pd.Timedelta) + assert res == pd.Timedelta(days=1, hours=1) + + res = freq + pd.Timedelta(nanoseconds=1) + assert isinstance(res, pd.Timedelta) + assert res == pd.Timedelta(days=1, nanoseconds=1) + elif typ == "index" and dt == "period": + tm.assert_index_equal(result, expected) + assert isinstance(result.freq, MonthEnd) + assert result.freq == MonthEnd() + assert result.freqstr == "M" + tm.assert_index_equal(result.shift(2), expected.shift(2)) + elif typ == "series" and dt in ("dt_tz", "cat"): + tm.assert_series_equal(result, expected) + elif typ == "frame" and dt in ( + "dt_mixed_tzs", + "cat_onecol", + "cat_and_float", + ): + tm.assert_frame_equal(result, expected) + else: + compare_element(result, expected, typ) + + +def python_pickler(obj, path): + with open(path, "wb") as fh: + pickle.dump(obj, fh, protocol=-1) + + +def python_unpickler(path): + with open(path, "rb") as fh: + fh.seek(0) + return pickle.load(fh) + + +def flatten(data: dict) -> list[tuple[str, Any]]: + """Flatten create_pickle_data""" + return [ + (typ, example) + for typ, examples in data.items() + for example in examples.values() + ] + + +@pytest.mark.parametrize( + "pickle_writer", + [ + pytest.param(python_pickler, id="python"), + pytest.param(pd.to_pickle, id="pandas_proto_default"), + pytest.param( + functools.partial(pd.to_pickle, protocol=pickle.HIGHEST_PROTOCOL), + id="pandas_proto_highest", + ), + pytest.param(functools.partial(pd.to_pickle, protocol=4), id="pandas_proto_4"), + pytest.param( + functools.partial(pd.to_pickle, protocol=5), + id="pandas_proto_5", + ), + ], +) +@pytest.mark.parametrize("writer", [pd.to_pickle, python_pickler]) +@pytest.mark.parametrize("typ, expected", flatten(create_pickle_data())) +def test_round_trip_current(typ, expected, pickle_writer, writer): + with tm.ensure_clean() as path: + # test writing with each pickler + pickle_writer(expected, path) + + # test reading with each unpickler + result = pd.read_pickle(path) + compare_element(result, expected, typ) + + result = python_unpickler(path) + compare_element(result, expected, typ) + + # and the same for file objects (GH 35679) + with open(path, mode="wb") as handle: + writer(expected, path) + handle.seek(0) # shouldn't close file handle + with open(path, mode="rb") as handle: + result = pd.read_pickle(handle) + handle.seek(0) # shouldn't close file handle + compare_element(result, expected, typ) + + +def test_pickle_path_pathlib(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + result = tm.round_trip_pathlib(df.to_pickle, pd.read_pickle) + tm.assert_frame_equal(df, result) + + +def test_pickle_path_localpath(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + result = tm.round_trip_localpath(df.to_pickle, pd.read_pickle) + tm.assert_frame_equal(df, result) + + +# --------------------- +# test pickle compression +# --------------------- + + +@pytest.fixture +def get_random_path(): + return f"__{uuid.uuid4()}__.pickle" + + +class TestCompression: + _extension_to_compression = icom.extension_to_compression + + def compress_file(self, src_path, dest_path, compression): + if compression is None: + shutil.copyfile(src_path, dest_path) + return + + if compression == "gzip": + f = gzip.open(dest_path, "w") + elif compression == "bz2": + f = bz2.BZ2File(dest_path, "w") + elif compression == "zip": + with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f: + f.write(src_path, os.path.basename(src_path)) + elif compression == "tar": + with open(src_path, "rb") as fh: + with tarfile.open(dest_path, mode="w") as tar: + tarinfo = tar.gettarinfo(src_path, os.path.basename(src_path)) + tar.addfile(tarinfo, fh) + elif compression == "xz": + f = get_lzma_file()(dest_path, "w") + elif compression == "zstd": + f = import_optional_dependency("zstandard").open(dest_path, "wb") + else: + msg = f"Unrecognized compression type: {compression}" + raise ValueError(msg) + + if compression not in ["zip", "tar"]: + with open(src_path, "rb") as fh: + with f: + f.write(fh.read()) + + def test_write_explicit(self, compression, get_random_path): + base = get_random_path + path1 = base + ".compressed" + path2 = base + ".raw" + + with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # write to compressed file + df.to_pickle(p1, compression=compression) + + # decompress + with tm.decompress_file(p1, compression=compression) as f: + with open(p2, "wb") as fh: + fh.write(f.read()) + + # read decompressed file + df2 = pd.read_pickle(p2, compression=None) + + tm.assert_frame_equal(df, df2) + + @pytest.mark.parametrize("compression", ["", "None", "bad", "7z"]) + def test_write_explicit_bad(self, compression, get_random_path): + with pytest.raises(ValueError, match="Unrecognized compression type"): + with tm.ensure_clean(get_random_path) as path: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_pickle(path, compression=compression) + + def test_write_infer(self, compression_ext, get_random_path): + base = get_random_path + path1 = base + compression_ext + path2 = base + ".raw" + compression = self._extension_to_compression.get(compression_ext.lower()) + + with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # write to compressed file by inferred compression method + df.to_pickle(p1) + + # decompress + with tm.decompress_file(p1, compression=compression) as f: + with open(p2, "wb") as fh: + fh.write(f.read()) + + # read decompressed file + df2 = pd.read_pickle(p2, compression=None) + + tm.assert_frame_equal(df, df2) + + def test_read_explicit(self, compression, get_random_path): + base = get_random_path + path1 = base + ".raw" + path2 = base + ".compressed" + + with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # write to uncompressed file + df.to_pickle(p1, compression=None) + + # compress + self.compress_file(p1, p2, compression=compression) + + # read compressed file + df2 = pd.read_pickle(p2, compression=compression) + tm.assert_frame_equal(df, df2) + + def test_read_infer(self, compression_ext, get_random_path): + base = get_random_path + path1 = base + ".raw" + path2 = base + compression_ext + compression = self._extension_to_compression.get(compression_ext.lower()) + + with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # write to uncompressed file + df.to_pickle(p1, compression=None) + + # compress + self.compress_file(p1, p2, compression=compression) + + # read compressed file by inferred compression method + df2 = pd.read_pickle(p2) + tm.assert_frame_equal(df, df2) + + +# --------------------- +# test pickle compression +# --------------------- + + +class TestProtocol: + @pytest.mark.parametrize("protocol", [-1, 0, 1, 2]) + def test_read(self, protocol, get_random_path): + with tm.ensure_clean(get_random_path) as path: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_pickle(path, protocol=protocol) + df2 = pd.read_pickle(path) + tm.assert_frame_equal(df, df2) + + +@pytest.mark.parametrize( + ["pickle_file", "excols"], + [ + ("test_py27.pkl", Index(["a", "b", "c"])), + ( + "test_mi_py27.pkl", + pd.MultiIndex.from_arrays([["a", "b", "c"], ["A", "B", "C"]]), + ), + ], +) +def test_unicode_decode_error(datapath, pickle_file, excols): + # pickle file written with py27, should be readable without raising + # UnicodeDecodeError, see GH#28645 and GH#31988 + path = datapath("io", "data", "pickle", pickle_file) + df = pd.read_pickle(path) + + # just test the columns are correct since the values are random + tm.assert_index_equal(df.columns, excols) + + +# --------------------- +# tests for buffer I/O +# --------------------- + + +def test_pickle_buffer_roundtrip(): + with tm.ensure_clean() as path: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + with open(path, "wb") as fh: + df.to_pickle(fh) + with open(path, "rb") as fh: + result = pd.read_pickle(fh) + tm.assert_frame_equal(df, result) + + +# --------------------- +# tests for URL I/O +# --------------------- + + +@pytest.mark.parametrize( + "mockurl", ["http://url.com", "ftp://test.com", "http://gzip.com"] +) +def test_pickle_generalurl_read(monkeypatch, mockurl): + def python_pickler(obj, path): + with open(path, "wb") as fh: + pickle.dump(obj, fh, protocol=-1) + + class MockReadResponse: + def __init__(self, path) -> None: + self.file = open(path, "rb") + if "gzip" in path: + self.headers = {"Content-Encoding": "gzip"} + else: + self.headers = {"Content-Encoding": ""} + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def read(self): + return self.file.read() + + def close(self): + return self.file.close() + + with tm.ensure_clean() as path: + + def mock_urlopen_read(*args, **kwargs): + return MockReadResponse(path) + + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + python_pickler(df, path) + monkeypatch.setattr("urllib.request.urlopen", mock_urlopen_read) + result = pd.read_pickle(mockurl) + tm.assert_frame_equal(df, result) + + +def test_pickle_fsspec_roundtrip(): + pytest.importorskip("fsspec") + with tm.ensure_clean(): + mockurl = "memory://mockfile" + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_pickle(mockurl) + result = pd.read_pickle(mockurl) + tm.assert_frame_equal(df, result) + + +class MyTz(datetime.tzinfo): + def __init__(self) -> None: + pass + + +def test_read_pickle_with_subclass(): + # GH 12163 + expected = Series(dtype=object), MyTz() + result = tm.round_trip_pickle(expected) + + tm.assert_series_equal(result[0], expected[0]) + assert isinstance(result[1], MyTz) + + +def test_pickle_binary_object_compression(compression): + """ + Read/write from binary file-objects w/wo compression. + + GH 26237, GH 29054, and GH 29570 + """ + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # reference for compression + with tm.ensure_clean() as path: + df.to_pickle(path, compression=compression) + reference = Path(path).read_bytes() + + # write + buffer = io.BytesIO() + df.to_pickle(buffer, compression=compression) + buffer.seek(0) + + # gzip and zip safe the filename: cannot compare the compressed content + assert buffer.getvalue() == reference or compression in ("gzip", "zip", "tar") + + # read + read_df = pd.read_pickle(buffer, compression=compression) + buffer.seek(0) + tm.assert_frame_equal(df, read_df) + + +def test_pickle_dataframe_with_multilevel_index( + multiindex_year_month_day_dataframe_random_data, + multiindex_dataframe_random_data, +): + ymd = multiindex_year_month_day_dataframe_random_data + frame = multiindex_dataframe_random_data + + def _test_roundtrip(frame): + unpickled = tm.round_trip_pickle(frame) + tm.assert_frame_equal(frame, unpickled) + + _test_roundtrip(frame) + _test_roundtrip(frame.T) + _test_roundtrip(ymd) + _test_roundtrip(ymd.T) + + +def test_pickle_timeseries_periodindex(): + # GH#2891 + prng = period_range("1/1/2011", "1/1/2012", freq="M") + ts = Series(np.random.default_rng(2).standard_normal(len(prng)), prng) + new_ts = tm.round_trip_pickle(ts) + assert new_ts.index.freqstr == "M" + + +@pytest.mark.parametrize( + "name", [777, 777.0, "name", datetime.datetime(2001, 11, 11), (1, 2)] +) +def test_pickle_preserve_name(name): + unpickled = tm.round_trip_pickle(Series(np.arange(10, dtype=np.float64), name=name)) + assert unpickled.name == name + + +def test_pickle_datetimes(datetime_series): + unp_ts = tm.round_trip_pickle(datetime_series) + tm.assert_series_equal(unp_ts, datetime_series) + + +def test_pickle_strings(string_series): + unp_series = tm.round_trip_pickle(string_series) + tm.assert_series_equal(unp_series, string_series) + + +@td.skip_array_manager_invalid_test +def test_pickle_preserves_block_ndim(): + # GH#37631 + ser = Series(list("abc")).astype("category").iloc[[0]] + res = tm.round_trip_pickle(ser) + + assert res._mgr.blocks[0].ndim == 1 + assert res._mgr.blocks[0].shape == (1,) + + # GH#37631 OP issue was about indexing, underlying problem was pickle + tm.assert_series_equal(res[[True]], ser) + + +@pytest.mark.parametrize("protocol", [pickle.DEFAULT_PROTOCOL, pickle.HIGHEST_PROTOCOL]) +def test_pickle_big_dataframe_compression(protocol, compression): + # GH#39002 + df = DataFrame(range(100000)) + result = tm.round_trip_pathlib( + partial(df.to_pickle, protocol=protocol, compression=compression), + partial(pd.read_pickle, compression=compression), + ) + tm.assert_frame_equal(df, result) + + +def test_pickle_frame_v124_unpickle_130(datapath): + # GH#42345 DataFrame created in 1.2.x, unpickle in 1.3.x + path = datapath( + Path(__file__).parent, + "data", + "legacy_pickle", + "1.2.4", + "empty_frame_v1_2_4-GH#42345.pkl", + ) + with open(path, "rb") as fd: + df = pickle.load(fd) + + expected = DataFrame(index=[], columns=[]) + tm.assert_frame_equal(df, expected) + + +def test_pickle_pos_args_deprecation(): + # GH-54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_pickle except for the " + r"argument 'path' will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + buffer = io.BytesIO() + df.to_pickle(buffer, "infer") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_s3.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_s3.py new file mode 100644 index 0000000000000000000000000000000000000000..79473895b662da6af68fbe29a60eb05f134a54df --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_s3.py @@ -0,0 +1,43 @@ +from io import BytesIO + +import pytest + +from pandas import read_csv + + +def test_streaming_s3_objects(): + # GH17135 + # botocore gained iteration support in 1.10.47, can now be used in read_* + pytest.importorskip("botocore", minversion="1.10.47") + from botocore.response import StreamingBody + + data = [b"foo,bar,baz\n1,2,3\n4,5,6\n", b"just,the,header\n"] + for el in data: + body = StreamingBody(BytesIO(el), content_length=len(el)) + read_csv(body) + + +@pytest.mark.single_cpu +def test_read_without_creds_from_pub_bucket(s3_public_bucket_with_data, s3so): + # GH 34626 + pytest.importorskip("s3fs") + result = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv", + nrows=3, + storage_options=s3so, + ) + assert len(result) == 3 + + +@pytest.mark.single_cpu +def test_read_with_creds_from_pub_bucket(s3_public_bucket_with_data, s3so): + # Ensure we can read from a public bucket with credentials + # GH 34626 + pytest.importorskip("s3fs") + df = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv", + nrows=5, + header=None, + storage_options=s3so, + ) + assert len(df) == 5 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_spss.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_spss.py new file mode 100644 index 0000000000000000000000000000000000000000..e118c90d9bc02041719cd1452b5af8e77b12db77 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_spss.py @@ -0,0 +1,164 @@ +import datetime +from pathlib import Path + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.util.version import Version + +pyreadstat = pytest.importorskip("pyreadstat") + + +# TODO(CoW) - detection of chained assignment in cython +# https://github.com/pandas-dev/pandas/issues/51315 +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +@pytest.mark.parametrize("path_klass", [lambda p: p, Path]) +def test_spss_labelled_num(path_klass, datapath): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = path_klass(datapath("io", "data", "spss", "labelled-num.sav")) + + df = pd.read_spss(fname, convert_categoricals=True) + expected = pd.DataFrame({"VAR00002": "This is one"}, index=[0]) + expected["VAR00002"] = pd.Categorical(expected["VAR00002"]) + tm.assert_frame_equal(df, expected) + + df = pd.read_spss(fname, convert_categoricals=False) + expected = pd.DataFrame({"VAR00002": 1.0}, index=[0]) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +def test_spss_labelled_num_na(datapath): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = datapath("io", "data", "spss", "labelled-num-na.sav") + + df = pd.read_spss(fname, convert_categoricals=True) + expected = pd.DataFrame({"VAR00002": ["This is one", None]}) + expected["VAR00002"] = pd.Categorical(expected["VAR00002"]) + tm.assert_frame_equal(df, expected) + + df = pd.read_spss(fname, convert_categoricals=False) + expected = pd.DataFrame({"VAR00002": [1.0, np.nan]}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +def test_spss_labelled_str(datapath): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = datapath("io", "data", "spss", "labelled-str.sav") + + df = pd.read_spss(fname, convert_categoricals=True) + expected = pd.DataFrame({"gender": ["Male", "Female"]}) + expected["gender"] = pd.Categorical(expected["gender"]) + tm.assert_frame_equal(df, expected) + + df = pd.read_spss(fname, convert_categoricals=False) + expected = pd.DataFrame({"gender": ["M", "F"]}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +def test_spss_umlauts(datapath): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = datapath("io", "data", "spss", "umlauts.sav") + + df = pd.read_spss(fname, convert_categoricals=True) + expected = pd.DataFrame( + {"var1": ["the ä umlaut", "the ü umlaut", "the ä umlaut", "the ö umlaut"]} + ) + expected["var1"] = pd.Categorical(expected["var1"]) + tm.assert_frame_equal(df, expected) + + df = pd.read_spss(fname, convert_categoricals=False) + expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]}) + tm.assert_frame_equal(df, expected) + + +def test_spss_usecols(datapath): + # usecols must be list-like + fname = datapath("io", "data", "spss", "labelled-num.sav") + + with pytest.raises(TypeError, match="usecols must be list-like."): + pd.read_spss(fname, usecols="VAR00002") + + +def test_spss_umlauts_dtype_backend(datapath, dtype_backend): + # test file from the Haven project (https://haven.tidyverse.org/) + # Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT + fname = datapath("io", "data", "spss", "umlauts.sav") + + df = pd.read_spss(fname, convert_categoricals=False, dtype_backend=dtype_backend) + expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]}, dtype="Int64") + + if dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + + from pandas.arrays import ArrowExtensionArray + + expected = pd.DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + + tm.assert_frame_equal(df, expected) + + +def test_invalid_dtype_backend(): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + pd.read_spss("test", dtype_backend="numpy") + + +@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError") +@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning") +def test_spss_metadata(datapath): + # GH 54264 + fname = datapath("io", "data", "spss", "labelled-num.sav") + + df = pd.read_spss(fname) + metadata = { + "column_names": ["VAR00002"], + "column_labels": [None], + "column_names_to_labels": {"VAR00002": None}, + "file_encoding": "UTF-8", + "number_columns": 1, + "number_rows": 1, + "variable_value_labels": {"VAR00002": {1.0: "This is one"}}, + "value_labels": {"labels0": {1.0: "This is one"}}, + "variable_to_label": {"VAR00002": "labels0"}, + "notes": [], + "original_variable_types": {"VAR00002": "F8.0"}, + "readstat_variable_types": {"VAR00002": "double"}, + "table_name": None, + "missing_ranges": {}, + "missing_user_values": {}, + "variable_storage_width": {"VAR00002": 8}, + "variable_display_width": {"VAR00002": 8}, + "variable_alignment": {"VAR00002": "unknown"}, + "variable_measure": {"VAR00002": "unknown"}, + "file_label": None, + "file_format": "sav/zsav", + } + if Version(pyreadstat.__version__) >= Version("1.2.4"): + metadata.update( + { + "creation_time": datetime.datetime(2015, 2, 6, 14, 33, 36), + "modification_time": datetime.datetime(2015, 2, 6, 14, 33, 36), + } + ) + assert df.attrs == metadata diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_sql.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_sql.py new file mode 100644 index 0000000000000000000000000000000000000000..4f1f965f26aa9f2e400471dfc14641cf65d0bdf8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_sql.py @@ -0,0 +1,4388 @@ +from __future__ import annotations + +import contextlib +from contextlib import closing +import csv +from datetime import ( + date, + datetime, + time, + timedelta, +) +from io import StringIO +from pathlib import Path +import sqlite3 +from typing import TYPE_CHECKING +import uuid + +import numpy as np +import pytest + +from pandas._libs import lib +from pandas.compat import ( + pa_version_under13p0, + pa_version_under14p1, +) +from pandas.compat._optional import import_optional_dependency +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + Timestamp, + concat, + date_range, + isna, + to_datetime, + to_timedelta, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) +from pandas.util.version import Version + +from pandas.io import sql +from pandas.io.sql import ( + SQLAlchemyEngine, + SQLDatabase, + SQLiteDatabase, + get_engine, + pandasSQL_builder, + read_sql_query, + read_sql_table, +) + +if TYPE_CHECKING: + import sqlalchemy + + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def sql_strings(): + return { + "read_parameters": { + "sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?", + "mysql": "SELECT * FROM iris WHERE `Name`=%s AND `SepalLength`=%s", + "postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s', + }, + "read_named_parameters": { + "sqlite": """ + SELECT * FROM iris WHERE Name=:name AND SepalLength=:length + """, + "mysql": """ + SELECT * FROM iris WHERE + `Name`=%(name)s AND `SepalLength`=%(length)s + """, + "postgresql": """ + SELECT * FROM iris WHERE + "Name"=%(name)s AND "SepalLength"=%(length)s + """, + }, + "read_no_parameters_with_percent": { + "sqlite": "SELECT * FROM iris WHERE Name LIKE '%'", + "mysql": "SELECT * FROM iris WHERE `Name` LIKE '%'", + "postgresql": "SELECT * FROM iris WHERE \"Name\" LIKE '%'", + }, + } + + +def iris_table_metadata(): + import sqlalchemy + from sqlalchemy import ( + Column, + Double, + Float, + MetaData, + String, + Table, + ) + + dtype = Double if Version(sqlalchemy.__version__) >= Version("2.0.0") else Float + metadata = MetaData() + iris = Table( + "iris", + metadata, + Column("SepalLength", dtype), + Column("SepalWidth", dtype), + Column("PetalLength", dtype), + Column("PetalWidth", dtype), + Column("Name", String(200)), + ) + return iris + + +def create_and_load_iris_sqlite3(conn, iris_file: Path): + stmt = """CREATE TABLE iris ( + "SepalLength" REAL, + "SepalWidth" REAL, + "PetalLength" REAL, + "PetalWidth" REAL, + "Name" TEXT + )""" + + cur = conn.cursor() + cur.execute(stmt) + with iris_file.open(newline=None, encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + next(reader) + stmt = "INSERT INTO iris VALUES(?, ?, ?, ?, ?)" + # ADBC requires explicit types - no implicit str -> float conversion + records = [] + records = [ + ( + float(row[0]), + float(row[1]), + float(row[2]), + float(row[3]), + row[4], + ) + for row in reader + ] + + cur.executemany(stmt, records) + cur.close() + + conn.commit() + + +def create_and_load_iris_postgresql(conn, iris_file: Path): + stmt = """CREATE TABLE iris ( + "SepalLength" DOUBLE PRECISION, + "SepalWidth" DOUBLE PRECISION, + "PetalLength" DOUBLE PRECISION, + "PetalWidth" DOUBLE PRECISION, + "Name" TEXT + )""" + with conn.cursor() as cur: + cur.execute(stmt) + with iris_file.open(newline=None, encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + next(reader) + stmt = "INSERT INTO iris VALUES($1, $2, $3, $4, $5)" + # ADBC requires explicit types - no implicit str -> float conversion + records = [ + ( + float(row[0]), + float(row[1]), + float(row[2]), + float(row[3]), + row[4], + ) + for row in reader + ] + + cur.executemany(stmt, records) + + conn.commit() + + +def create_and_load_iris(conn, iris_file: Path): + from sqlalchemy import insert + + iris = iris_table_metadata() + + with iris_file.open(newline=None, encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + header = next(reader) + params = [dict(zip(header, row)) for row in reader] + stmt = insert(iris).values(params) + with conn.begin() as con: + iris.drop(con, checkfirst=True) + iris.create(bind=con) + con.execute(stmt) + + +def create_and_load_iris_view(conn): + stmt = "CREATE VIEW iris_view AS SELECT * FROM iris" + if isinstance(conn, sqlite3.Connection): + cur = conn.cursor() + cur.execute(stmt) + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if adbc and isinstance(conn, adbc.Connection): + with conn.cursor() as cur: + cur.execute(stmt) + conn.commit() + else: + from sqlalchemy import text + + stmt = text(stmt) + with conn.begin() as con: + con.execute(stmt) + + +def types_table_metadata(dialect: str): + from sqlalchemy import ( + TEXT, + Boolean, + Column, + DateTime, + Float, + Integer, + MetaData, + Table, + ) + + date_type = TEXT if dialect == "sqlite" else DateTime + bool_type = Integer if dialect == "sqlite" else Boolean + metadata = MetaData() + types = Table( + "types", + metadata, + Column("TextCol", TEXT), + Column("DateCol", date_type), + Column("IntDateCol", Integer), + Column("IntDateOnlyCol", Integer), + Column("FloatCol", Float), + Column("IntCol", Integer), + Column("BoolCol", bool_type), + Column("IntColWithNull", Integer), + Column("BoolColWithNull", bool_type), + ) + return types + + +def create_and_load_types_sqlite3(conn, types_data: list[dict]): + stmt = """CREATE TABLE types ( + "TextCol" TEXT, + "DateCol" TEXT, + "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, + "FloatCol" REAL, + "IntCol" INTEGER, + "BoolCol" INTEGER, + "IntColWithNull" INTEGER, + "BoolColWithNull" INTEGER + )""" + + ins_stmt = """ + INSERT INTO types + VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?) + """ + + if isinstance(conn, sqlite3.Connection): + cur = conn.cursor() + cur.execute(stmt) + cur.executemany(ins_stmt, types_data) + else: + with conn.cursor() as cur: + cur.execute(stmt) + cur.executemany(ins_stmt, types_data) + + conn.commit() + + +def create_and_load_types_postgresql(conn, types_data: list[dict]): + with conn.cursor() as cur: + stmt = """CREATE TABLE types ( + "TextCol" TEXT, + "DateCol" TIMESTAMP, + "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, + "FloatCol" DOUBLE PRECISION, + "IntCol" INTEGER, + "BoolCol" BOOLEAN, + "IntColWithNull" INTEGER, + "BoolColWithNull" BOOLEAN + )""" + cur.execute(stmt) + + stmt = """ + INSERT INTO types + VALUES($1, $2::timestamp, $3, $4, $5, $6, $7, $8, $9) + """ + + cur.executemany(stmt, types_data) + + conn.commit() + + +def create_and_load_types(conn, types_data: list[dict], dialect: str): + from sqlalchemy import insert + from sqlalchemy.engine import Engine + + types = types_table_metadata(dialect) + + stmt = insert(types).values(types_data) + if isinstance(conn, Engine): + with conn.connect() as conn: + with conn.begin(): + types.drop(conn, checkfirst=True) + types.create(bind=conn) + conn.execute(stmt) + else: + with conn.begin(): + types.drop(conn, checkfirst=True) + types.create(bind=conn) + conn.execute(stmt) + + +def create_and_load_postgres_datetz(conn): + from sqlalchemy import ( + Column, + DateTime, + MetaData, + Table, + insert, + ) + from sqlalchemy.engine import Engine + + metadata = MetaData() + datetz = Table("datetz", metadata, Column("DateColWithTz", DateTime(timezone=True))) + datetz_data = [ + { + "DateColWithTz": "2000-01-01 00:00:00-08:00", + }, + { + "DateColWithTz": "2000-06-01 00:00:00-07:00", + }, + ] + stmt = insert(datetz).values(datetz_data) + if isinstance(conn, Engine): + with conn.connect() as conn: + with conn.begin(): + datetz.drop(conn, checkfirst=True) + datetz.create(bind=conn) + conn.execute(stmt) + else: + with conn.begin(): + datetz.drop(conn, checkfirst=True) + datetz.create(bind=conn) + conn.execute(stmt) + + # "2000-01-01 00:00:00-08:00" should convert to + # "2000-01-01 08:00:00" + # "2000-06-01 00:00:00-07:00" should convert to + # "2000-06-01 07:00:00" + # GH 6415 + expected_data = [ + Timestamp("2000-01-01 08:00:00", tz="UTC"), + Timestamp("2000-06-01 07:00:00", tz="UTC"), + ] + return Series(expected_data, name="DateColWithTz") + + +def check_iris_frame(frame: DataFrame): + pytype = frame.dtypes.iloc[0].type + row = frame.iloc[0] + assert issubclass(pytype, np.floating) + tm.assert_series_equal( + row, Series([5.1, 3.5, 1.4, 0.2, "Iris-setosa"], index=frame.columns, name=0) + ) + assert frame.shape in ((150, 5), (8, 5)) + + +def count_rows(conn, table_name: str): + stmt = f"SELECT count(*) AS count_1 FROM {table_name}" + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if isinstance(conn, sqlite3.Connection): + cur = conn.cursor() + return cur.execute(stmt).fetchone()[0] + elif adbc and isinstance(conn, adbc.Connection): + with conn.cursor() as cur: + cur.execute(stmt) + return cur.fetchone()[0] + else: + from sqlalchemy import create_engine + from sqlalchemy.engine import Engine + + if isinstance(conn, str): + try: + engine = create_engine(conn) + with engine.connect() as conn: + return conn.exec_driver_sql(stmt).scalar_one() + finally: + engine.dispose() + elif isinstance(conn, Engine): + with conn.connect() as conn: + return conn.exec_driver_sql(stmt).scalar_one() + else: + return conn.exec_driver_sql(stmt).scalar_one() + + +@pytest.fixture +def iris_path(datapath): + iris_path = datapath("io", "data", "csv", "iris.csv") + return Path(iris_path) + + +@pytest.fixture +def types_data(): + return [ + { + "TextCol": "first", + "DateCol": "2000-01-03 00:00:00", + "IntDateCol": 535852800, + "IntDateOnlyCol": 20101010, + "FloatCol": 10.10, + "IntCol": 1, + "BoolCol": False, + "IntColWithNull": 1, + "BoolColWithNull": False, + }, + { + "TextCol": "first", + "DateCol": "2000-01-04 00:00:00", + "IntDateCol": 1356998400, + "IntDateOnlyCol": 20101212, + "FloatCol": 10.10, + "IntCol": 1, + "BoolCol": False, + "IntColWithNull": None, + "BoolColWithNull": None, + }, + ] + + +@pytest.fixture +def types_data_frame(types_data): + dtypes = { + "TextCol": "str", + "DateCol": "str", + "IntDateCol": "int64", + "IntDateOnlyCol": "int64", + "FloatCol": "float", + "IntCol": "int64", + "BoolCol": "int64", + "IntColWithNull": "float", + "BoolColWithNull": "float", + } + df = DataFrame(types_data) + return df[dtypes.keys()].astype(dtypes) + + +@pytest.fixture +def test_frame1(): + columns = ["index", "A", "B", "C", "D"] + data = [ + ( + "2000-01-03 00:00:00", + 0.980268513777, + 3.68573087906, + -0.364216805298, + -1.15973806169, + ), + ( + "2000-01-04 00:00:00", + 1.04791624281, + -0.0412318367011, + -0.16181208307, + 0.212549316967, + ), + ( + "2000-01-05 00:00:00", + 0.498580885705, + 0.731167677815, + -0.537677223318, + 1.34627041952, + ), + ( + "2000-01-06 00:00:00", + 1.12020151869, + 1.56762092543, + 0.00364077397681, + 0.67525259227, + ), + ] + return DataFrame(data, columns=columns) + + +@pytest.fixture +def test_frame3(): + columns = ["index", "A", "B"] + data = [ + ("2000-01-03 00:00:00", 2**31 - 1, -1.987670), + ("2000-01-04 00:00:00", -29, -0.0412318367011), + ("2000-01-05 00:00:00", 20000, 0.731167677815), + ("2000-01-06 00:00:00", -290867, 1.56762092543), + ] + return DataFrame(data, columns=columns) + + +def get_all_views(conn): + if isinstance(conn, sqlite3.Connection): + c = conn.execute("SELECT name FROM sqlite_master WHERE type='view'") + return [view[0] for view in c.fetchall()] + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if adbc and isinstance(conn, adbc.Connection): + results = [] + info = conn.adbc_get_objects().read_all().to_pylist() + for catalog in info: + catalog["catalog_name"] + for schema in catalog["catalog_db_schemas"]: + schema["db_schema_name"] + for table in schema["db_schema_tables"]: + if table["table_type"] == "view": + view_name = table["table_name"] + results.append(view_name) + + return results + else: + from sqlalchemy import inspect + + return inspect(conn).get_view_names() + + +def get_all_tables(conn): + if isinstance(conn, sqlite3.Connection): + c = conn.execute("SELECT name FROM sqlite_master WHERE type='table'") + return [table[0] for table in c.fetchall()] + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + + if adbc and isinstance(conn, adbc.Connection): + results = [] + info = conn.adbc_get_objects().read_all().to_pylist() + for catalog in info: + for schema in catalog["catalog_db_schemas"]: + for table in schema["db_schema_tables"]: + if table["table_type"] == "table": + table_name = table["table_name"] + results.append(table_name) + + return results + else: + from sqlalchemy import inspect + + return inspect(conn).get_table_names() + + +def drop_table( + table_name: str, + conn: sqlite3.Connection | sqlalchemy.engine.Engine | sqlalchemy.engine.Connection, +): + if isinstance(conn, sqlite3.Connection): + conn.execute(f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}") + conn.commit() + + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if adbc and isinstance(conn, adbc.Connection): + with conn.cursor() as cur: + cur.execute(f'DROP TABLE IF EXISTS "{table_name}"') + else: + with conn.begin() as con: + with sql.SQLDatabase(con) as db: + db.drop_table(table_name) + + +def drop_view( + view_name: str, + conn: sqlite3.Connection | sqlalchemy.engine.Engine | sqlalchemy.engine.Connection, +): + import sqlalchemy + + if isinstance(conn, sqlite3.Connection): + conn.execute(f"DROP VIEW IF EXISTS {sql._get_valid_sqlite_name(view_name)}") + conn.commit() + else: + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if adbc and isinstance(conn, adbc.Connection): + with conn.cursor() as cur: + cur.execute(f'DROP VIEW IF EXISTS "{view_name}"') + else: + quoted_view = conn.engine.dialect.identifier_preparer.quote_identifier( + view_name + ) + stmt = sqlalchemy.text(f"DROP VIEW IF EXISTS {quoted_view}") + with conn.begin() as con: + con.execute(stmt) # type: ignore[union-attr] + + +@pytest.fixture +def mysql_pymysql_engine(): + sqlalchemy = pytest.importorskip("sqlalchemy") + pymysql = pytest.importorskip("pymysql") + engine = sqlalchemy.create_engine( + "mysql+pymysql://root@localhost:3306/pandas", + connect_args={"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}, + poolclass=sqlalchemy.pool.NullPool, + ) + yield engine + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) + engine.dispose() + + +@pytest.fixture +def mysql_pymysql_engine_iris(mysql_pymysql_engine, iris_path): + create_and_load_iris(mysql_pymysql_engine, iris_path) + create_and_load_iris_view(mysql_pymysql_engine) + yield mysql_pymysql_engine + + +@pytest.fixture +def mysql_pymysql_engine_types(mysql_pymysql_engine, types_data): + create_and_load_types(mysql_pymysql_engine, types_data, "mysql") + yield mysql_pymysql_engine + + +@pytest.fixture +def mysql_pymysql_conn(mysql_pymysql_engine): + with mysql_pymysql_engine.connect() as conn: + yield conn + + +@pytest.fixture +def mysql_pymysql_conn_iris(mysql_pymysql_engine_iris): + with mysql_pymysql_engine_iris.connect() as conn: + yield conn + + +@pytest.fixture +def mysql_pymysql_conn_types(mysql_pymysql_engine_types): + with mysql_pymysql_engine_types.connect() as conn: + yield conn + + +@pytest.fixture +def postgresql_psycopg2_engine(): + sqlalchemy = pytest.importorskip("sqlalchemy") + pytest.importorskip("psycopg2") + engine = sqlalchemy.create_engine( + "postgresql+psycopg2://postgres:postgres@localhost:5432/pandas", + poolclass=sqlalchemy.pool.NullPool, + ) + yield engine + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) + engine.dispose() + + +@pytest.fixture +def postgresql_psycopg2_engine_iris(postgresql_psycopg2_engine, iris_path): + create_and_load_iris(postgresql_psycopg2_engine, iris_path) + create_and_load_iris_view(postgresql_psycopg2_engine) + yield postgresql_psycopg2_engine + + +@pytest.fixture +def postgresql_psycopg2_engine_types(postgresql_psycopg2_engine, types_data): + create_and_load_types(postgresql_psycopg2_engine, types_data, "postgres") + yield postgresql_psycopg2_engine + + +@pytest.fixture +def postgresql_psycopg2_conn(postgresql_psycopg2_engine): + with postgresql_psycopg2_engine.connect() as conn: + yield conn + + +@pytest.fixture +def postgresql_adbc_conn(): + pytest.importorskip("adbc_driver_postgresql") + from adbc_driver_postgresql import dbapi + + uri = "postgresql://postgres:postgres@localhost:5432/pandas" + with dbapi.connect(uri) as conn: + yield conn + for view in get_all_views(conn): + drop_view(view, conn) + for tbl in get_all_tables(conn): + drop_table(tbl, conn) + conn.commit() + + +@pytest.fixture +def postgresql_adbc_iris(postgresql_adbc_conn, iris_path): + import adbc_driver_manager as mgr + + conn = postgresql_adbc_conn + + try: + conn.adbc_get_table_schema("iris") + except mgr.ProgrammingError: + conn.rollback() + create_and_load_iris_postgresql(conn, iris_path) + try: + conn.adbc_get_table_schema("iris_view") + except mgr.ProgrammingError: # note arrow-adbc issue 1022 + conn.rollback() + create_and_load_iris_view(conn) + yield conn + + +@pytest.fixture +def postgresql_adbc_types(postgresql_adbc_conn, types_data): + import adbc_driver_manager as mgr + + conn = postgresql_adbc_conn + + try: + conn.adbc_get_table_schema("types") + except mgr.ProgrammingError: + conn.rollback() + new_data = [tuple(entry.values()) for entry in types_data] + + create_and_load_types_postgresql(conn, new_data) + + yield conn + + +@pytest.fixture +def postgresql_psycopg2_conn_iris(postgresql_psycopg2_engine_iris): + with postgresql_psycopg2_engine_iris.connect() as conn: + yield conn + + +@pytest.fixture +def postgresql_psycopg2_conn_types(postgresql_psycopg2_engine_types): + with postgresql_psycopg2_engine_types.connect() as conn: + yield conn + + +@pytest.fixture +def sqlite_str(): + pytest.importorskip("sqlalchemy") + with tm.ensure_clean() as name: + yield f"sqlite:///{name}" + + +@pytest.fixture +def sqlite_engine(sqlite_str): + sqlalchemy = pytest.importorskip("sqlalchemy") + engine = sqlalchemy.create_engine(sqlite_str, poolclass=sqlalchemy.pool.NullPool) + yield engine + for view in get_all_views(engine): + drop_view(view, engine) + for tbl in get_all_tables(engine): + drop_table(tbl, engine) + engine.dispose() + + +@pytest.fixture +def sqlite_conn(sqlite_engine): + with sqlite_engine.connect() as conn: + yield conn + + +@pytest.fixture +def sqlite_str_iris(sqlite_str, iris_path): + sqlalchemy = pytest.importorskip("sqlalchemy") + engine = sqlalchemy.create_engine(sqlite_str) + create_and_load_iris(engine, iris_path) + create_and_load_iris_view(engine) + engine.dispose() + return sqlite_str + + +@pytest.fixture +def sqlite_engine_iris(sqlite_engine, iris_path): + create_and_load_iris(sqlite_engine, iris_path) + create_and_load_iris_view(sqlite_engine) + yield sqlite_engine + + +@pytest.fixture +def sqlite_conn_iris(sqlite_engine_iris): + with sqlite_engine_iris.connect() as conn: + yield conn + + +@pytest.fixture +def sqlite_str_types(sqlite_str, types_data): + sqlalchemy = pytest.importorskip("sqlalchemy") + engine = sqlalchemy.create_engine(sqlite_str) + create_and_load_types(engine, types_data, "sqlite") + engine.dispose() + return sqlite_str + + +@pytest.fixture +def sqlite_engine_types(sqlite_engine, types_data): + create_and_load_types(sqlite_engine, types_data, "sqlite") + yield sqlite_engine + + +@pytest.fixture +def sqlite_conn_types(sqlite_engine_types): + with sqlite_engine_types.connect() as conn: + yield conn + + +@pytest.fixture +def sqlite_adbc_conn(): + pytest.importorskip("adbc_driver_sqlite") + from adbc_driver_sqlite import dbapi + + with tm.ensure_clean() as name: + uri = f"file:{name}" + with dbapi.connect(uri) as conn: + yield conn + for view in get_all_views(conn): + drop_view(view, conn) + for tbl in get_all_tables(conn): + drop_table(tbl, conn) + conn.commit() + + +@pytest.fixture +def sqlite_adbc_iris(sqlite_adbc_conn, iris_path): + import adbc_driver_manager as mgr + + conn = sqlite_adbc_conn + try: + conn.adbc_get_table_schema("iris") + except mgr.ProgrammingError: + conn.rollback() + create_and_load_iris_sqlite3(conn, iris_path) + try: + conn.adbc_get_table_schema("iris_view") + except mgr.ProgrammingError: + conn.rollback() + create_and_load_iris_view(conn) + yield conn + + +@pytest.fixture +def sqlite_adbc_types(sqlite_adbc_conn, types_data): + import adbc_driver_manager as mgr + + conn = sqlite_adbc_conn + try: + conn.adbc_get_table_schema("types") + except mgr.ProgrammingError: + conn.rollback() + new_data = [] + for entry in types_data: + entry["BoolCol"] = int(entry["BoolCol"]) + if entry["BoolColWithNull"] is not None: + entry["BoolColWithNull"] = int(entry["BoolColWithNull"]) + new_data.append(tuple(entry.values())) + + create_and_load_types_sqlite3(conn, new_data) + conn.commit() + + yield conn + + +@pytest.fixture +def sqlite_buildin(): + with contextlib.closing(sqlite3.connect(":memory:")) as closing_conn: + with closing_conn as conn: + yield conn + + +@pytest.fixture +def sqlite_buildin_iris(sqlite_buildin, iris_path): + create_and_load_iris_sqlite3(sqlite_buildin, iris_path) + create_and_load_iris_view(sqlite_buildin) + yield sqlite_buildin + + +@pytest.fixture +def sqlite_buildin_types(sqlite_buildin, types_data): + types_data = [tuple(entry.values()) for entry in types_data] + create_and_load_types_sqlite3(sqlite_buildin, types_data) + yield sqlite_buildin + + +mysql_connectable = [ + pytest.param("mysql_pymysql_engine", marks=pytest.mark.db), + pytest.param("mysql_pymysql_conn", marks=pytest.mark.db), +] + +mysql_connectable_iris = [ + pytest.param("mysql_pymysql_engine_iris", marks=pytest.mark.db), + pytest.param("mysql_pymysql_conn_iris", marks=pytest.mark.db), +] + +mysql_connectable_types = [ + pytest.param("mysql_pymysql_engine_types", marks=pytest.mark.db), + pytest.param("mysql_pymysql_conn_types", marks=pytest.mark.db), +] + +postgresql_connectable = [ + pytest.param("postgresql_psycopg2_engine", marks=pytest.mark.db), + pytest.param("postgresql_psycopg2_conn", marks=pytest.mark.db), +] + +postgresql_connectable_iris = [ + pytest.param("postgresql_psycopg2_engine_iris", marks=pytest.mark.db), + pytest.param("postgresql_psycopg2_conn_iris", marks=pytest.mark.db), +] + +postgresql_connectable_types = [ + pytest.param("postgresql_psycopg2_engine_types", marks=pytest.mark.db), + pytest.param("postgresql_psycopg2_conn_types", marks=pytest.mark.db), +] + +sqlite_connectable = [ + "sqlite_engine", + "sqlite_conn", + "sqlite_str", +] + +sqlite_connectable_iris = [ + "sqlite_engine_iris", + "sqlite_conn_iris", + "sqlite_str_iris", +] + +sqlite_connectable_types = [ + "sqlite_engine_types", + "sqlite_conn_types", + "sqlite_str_types", +] + +sqlalchemy_connectable = mysql_connectable + postgresql_connectable + sqlite_connectable + +sqlalchemy_connectable_iris = ( + mysql_connectable_iris + postgresql_connectable_iris + sqlite_connectable_iris +) + +sqlalchemy_connectable_types = ( + mysql_connectable_types + postgresql_connectable_types + sqlite_connectable_types +) + +adbc_connectable = [ + "sqlite_adbc_conn", + pytest.param("postgresql_adbc_conn", marks=pytest.mark.db), +] + +adbc_connectable_iris = [ + pytest.param("postgresql_adbc_iris", marks=pytest.mark.db), + pytest.param("sqlite_adbc_iris", marks=pytest.mark.db), +] + +adbc_connectable_types = [ + pytest.param("postgresql_adbc_types", marks=pytest.mark.db), + pytest.param("sqlite_adbc_types", marks=pytest.mark.db), +] + + +all_connectable = sqlalchemy_connectable + ["sqlite_buildin"] + adbc_connectable + +all_connectable_iris = ( + sqlalchemy_connectable_iris + ["sqlite_buildin_iris"] + adbc_connectable_iris +) + +all_connectable_types = ( + sqlalchemy_connectable_types + ["sqlite_buildin_types"] + adbc_connectable_types +) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql(conn, test_frame1, request): + # GH 51086 if conn is sqlite_engine + conn = request.getfixturevalue(conn) + test_frame1.to_sql(name="test", con=conn, if_exists="append", index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql_empty(conn, test_frame1, request): + if conn == "postgresql_adbc_conn": + request.node.add_marker( + pytest.mark.xfail( + reason="postgres ADBC driver cannot insert index with null type", + strict=True, + ) + ) + # GH 51086 if conn is sqlite_engine + conn = request.getfixturevalue(conn) + empty_df = test_frame1.iloc[:0] + empty_df.to_sql(name="test", con=conn, if_exists="append", index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql_arrow_dtypes(conn, request): + # GH 52046 + pytest.importorskip("pyarrow") + df = DataFrame( + { + "int": pd.array([1], dtype="int8[pyarrow]"), + "datetime": pd.array( + [datetime(2023, 1, 1)], dtype="timestamp[ns][pyarrow]" + ), + "date": pd.array([date(2023, 1, 1)], dtype="date32[day][pyarrow]"), + "timedelta": pd.array([timedelta(1)], dtype="duration[ns][pyarrow]"), + "string": pd.array(["a"], dtype="string[pyarrow]"), + } + ) + + if "adbc" in conn: + if conn == "sqlite_adbc_conn": + df = df.drop(columns=["timedelta"]) + if pa_version_under14p1: + exp_warning = DeprecationWarning + msg = "is_sparse is deprecated" + else: + exp_warning = None + msg = "" + else: + exp_warning = UserWarning + msg = "the 'timedelta'" + + conn = request.getfixturevalue(conn) + with tm.assert_produces_warning(exp_warning, match=msg, check_stacklevel=False): + df.to_sql(name="test_arrow", con=conn, if_exists="replace", index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_dataframe_to_sql_arrow_dtypes_missing(conn, request, nulls_fixture): + # GH 52046 + pytest.importorskip("pyarrow") + df = DataFrame( + { + "datetime": pd.array( + [datetime(2023, 1, 1), nulls_fixture], dtype="timestamp[ns][pyarrow]" + ), + } + ) + conn = request.getfixturevalue(conn) + df.to_sql(name="test_arrow", con=conn, if_exists="replace", index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("method", [None, "multi"]) +def test_to_sql(conn, method, test_frame1, request): + if method == "multi" and "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'method' not implemented for ADBC drivers", strict=True + ) + ) + + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn, need_transaction=True) as pandasSQL: + pandasSQL.to_sql(test_frame1, "test_frame", method=method) + assert pandasSQL.has_table("test_frame") + assert count_rows(conn, "test_frame") == len(test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("mode, num_row_coef", [("replace", 1), ("append", 2)]) +def test_to_sql_exist(conn, mode, num_row_coef, test_frame1, request): + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn, need_transaction=True) as pandasSQL: + pandasSQL.to_sql(test_frame1, "test_frame", if_exists="fail") + pandasSQL.to_sql(test_frame1, "test_frame", if_exists=mode) + assert pandasSQL.has_table("test_frame") + assert count_rows(conn, "test_frame") == num_row_coef * len(test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_exist_fail(conn, test_frame1, request): + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn, need_transaction=True) as pandasSQL: + pandasSQL.to_sql(test_frame1, "test_frame", if_exists="fail") + assert pandasSQL.has_table("test_frame") + + msg = "Table 'test_frame' already exists" + with pytest.raises(ValueError, match=msg): + pandasSQL.to_sql(test_frame1, "test_frame", if_exists="fail") + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_iris_query(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = read_sql_query("SELECT * FROM iris", conn) + check_iris_frame(iris_frame) + iris_frame = pd.read_sql("SELECT * FROM iris", conn) + check_iris_frame(iris_frame) + iris_frame = pd.read_sql("SELECT * FROM iris where 0=1", conn) + assert iris_frame.shape == (0, 5) + assert "SepalWidth" in iris_frame.columns + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_iris_query_chunksize(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'chunksize' not implemented for ADBC drivers", + strict=True, + ) + ) + conn = request.getfixturevalue(conn) + iris_frame = concat(read_sql_query("SELECT * FROM iris", conn, chunksize=7)) + check_iris_frame(iris_frame) + iris_frame = concat(pd.read_sql("SELECT * FROM iris", conn, chunksize=7)) + check_iris_frame(iris_frame) + iris_frame = concat(pd.read_sql("SELECT * FROM iris where 0=1", conn, chunksize=7)) + assert iris_frame.shape == (0, 5) + assert "SepalWidth" in iris_frame.columns + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_read_iris_query_expression_with_parameter(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'chunksize' not implemented for ADBC drivers", + strict=True, + ) + ) + conn = request.getfixturevalue(conn) + from sqlalchemy import ( + MetaData, + Table, + create_engine, + select, + ) + + metadata = MetaData() + autoload_con = create_engine(conn) if isinstance(conn, str) else conn + iris = Table("iris", metadata, autoload_with=autoload_con) + iris_frame = read_sql_query( + select(iris), conn, params={"name": "Iris-setosa", "length": 5.1} + ) + check_iris_frame(iris_frame) + if isinstance(conn, str): + autoload_con.dispose() + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_iris_query_string_with_parameter(conn, request, sql_strings): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'chunksize' not implemented for ADBC drivers", + strict=True, + ) + ) + + for db, query in sql_strings["read_parameters"].items(): + if db in conn: + break + else: + raise KeyError(f"No part of {conn} found in sql_strings['read_parameters']") + conn = request.getfixturevalue(conn) + iris_frame = read_sql_query(query, conn, params=("Iris-setosa", 5.1)) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_read_iris_table(conn, request): + # GH 51015 if conn = sqlite_iris_str + conn = request.getfixturevalue(conn) + iris_frame = read_sql_table("iris", conn) + check_iris_frame(iris_frame) + iris_frame = pd.read_sql("iris", conn) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_read_iris_table_chunksize(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + iris_frame = concat(read_sql_table("iris", conn, chunksize=7)) + check_iris_frame(iris_frame) + iris_frame = concat(pd.read_sql("iris", conn, chunksize=7)) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_to_sql_callable(conn, test_frame1, request): + conn = request.getfixturevalue(conn) + + check = [] # used to double check function below is really being used + + def sample(pd_table, conn, keys, data_iter): + check.append(1) + data = [dict(zip(keys, row)) for row in data_iter] + conn.execute(pd_table.table.insert(), data) + + with pandasSQL_builder(conn, need_transaction=True) as pandasSQL: + pandasSQL.to_sql(test_frame1, "test_frame", method=sample) + assert pandasSQL.has_table("test_frame") + assert check == [1] + assert count_rows(conn, "test_frame") == len(test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable_types) +def test_default_type_conversion(conn, request): + conn_name = conn + if conn_name == "sqlite_buildin_types": + request.applymarker( + pytest.mark.xfail( + reason="sqlite_buildin connection does not implement read_sql_table" + ) + ) + + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + + assert issubclass(df.FloatCol.dtype.type, np.floating) + assert issubclass(df.IntCol.dtype.type, np.integer) + + # MySQL/sqlite has no real BOOL type + if "postgresql" in conn_name: + assert issubclass(df.BoolCol.dtype.type, np.bool_) + else: + assert issubclass(df.BoolCol.dtype.type, np.integer) + + # Int column with NA values stays as float + assert issubclass(df.IntColWithNull.dtype.type, np.floating) + + # Bool column with NA = int column with NA values => becomes float + if "postgresql" in conn_name: + assert issubclass(df.BoolColWithNull.dtype.type, object) + else: + assert issubclass(df.BoolColWithNull.dtype.type, np.floating) + + +@pytest.mark.parametrize("conn", mysql_connectable) +def test_read_procedure(conn, request): + conn = request.getfixturevalue(conn) + + # GH 7324 + # Although it is more an api test, it is added to the + # mysql tests as sqlite does not have stored procedures + from sqlalchemy import text + from sqlalchemy.engine import Engine + + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + df.to_sql(name="test_frame", con=conn, index=False) + + proc = """DROP PROCEDURE IF EXISTS get_testdb; + + CREATE PROCEDURE get_testdb () + + BEGIN + SELECT * FROM test_frame; + END""" + proc = text(proc) + if isinstance(conn, Engine): + with conn.connect() as engine_conn: + with engine_conn.begin(): + engine_conn.execute(proc) + else: + with conn.begin(): + conn.execute(proc) + + res1 = sql.read_sql_query("CALL get_testdb();", conn) + tm.assert_frame_equal(df, res1) + + # test delegation to read_sql_query + res2 = sql.read_sql("CALL get_testdb();", conn) + tm.assert_frame_equal(df, res2) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +@pytest.mark.parametrize("expected_count", [2, "Success!"]) +def test_copy_from_callable_insertion_method(conn, expected_count, request): + # GH 8953 + # Example in io.rst found under _io.sql.method + # not available in sqlite, mysql + def psql_insert_copy(table, conn, keys, data_iter): + # gets a DBAPI connection that can provide a cursor + dbapi_conn = conn.connection + with dbapi_conn.cursor() as cur: + s_buf = StringIO() + writer = csv.writer(s_buf) + writer.writerows(data_iter) + s_buf.seek(0) + + columns = ", ".join([f'"{k}"' for k in keys]) + if table.schema: + table_name = f"{table.schema}.{table.name}" + else: + table_name = table.name + + sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV" + cur.copy_expert(sql=sql_query, file=s_buf) + return expected_count + + conn = request.getfixturevalue(conn) + expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]}) + result_count = expected.to_sql( + name="test_frame", con=conn, index=False, method=psql_insert_copy + ) + # GH 46891 + if expected_count is None: + assert result_count is None + else: + assert result_count == expected_count + result = sql.read_sql_table("test_frame", conn) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_insertion_method_on_conflict_do_nothing(conn, request): + # GH 15988: Example in to_sql docstring + conn = request.getfixturevalue(conn) + + from sqlalchemy.dialects.postgresql import insert + from sqlalchemy.engine import Engine + from sqlalchemy.sql import text + + def insert_on_conflict(table, conn, keys, data_iter): + data = [dict(zip(keys, row)) for row in data_iter] + stmt = ( + insert(table.table) + .values(data) + .on_conflict_do_nothing(index_elements=["a"]) + ) + result = conn.execute(stmt) + return result.rowcount + + create_sql = text( + """ + CREATE TABLE test_insert_conflict ( + a integer PRIMARY KEY, + b numeric, + c text + ); + """ + ) + if isinstance(conn, Engine): + with conn.connect() as con: + with con.begin(): + con.execute(create_sql) + else: + with conn.begin(): + conn.execute(create_sql) + + expected = DataFrame([[1, 2.1, "a"]], columns=list("abc")) + expected.to_sql( + name="test_insert_conflict", con=conn, if_exists="append", index=False + ) + + df_insert = DataFrame([[1, 3.2, "b"]], columns=list("abc")) + inserted = df_insert.to_sql( + name="test_insert_conflict", + con=conn, + index=False, + if_exists="append", + method=insert_on_conflict, + ) + result = sql.read_sql_table("test_insert_conflict", conn) + tm.assert_frame_equal(result, expected) + assert inserted == 0 + + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_insert_conflict") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_on_public_schema(conn, request): + if "sqlite" in conn or "mysql" in conn: + request.applymarker( + pytest.mark.xfail( + reason="test for public schema only specific to postgresql" + ) + ) + + conn = request.getfixturevalue(conn) + + test_data = DataFrame([[1, 2.1, "a"], [2, 3.1, "b"]], columns=list("abc")) + test_data.to_sql( + name="test_public_schema", + con=conn, + if_exists="append", + index=False, + schema="public", + ) + + df_out = sql.read_sql_table("test_public_schema", conn, schema="public") + tm.assert_frame_equal(test_data, df_out) + + +@pytest.mark.parametrize("conn", mysql_connectable) +def test_insertion_method_on_conflict_update(conn, request): + # GH 14553: Example in to_sql docstring + conn = request.getfixturevalue(conn) + + from sqlalchemy.dialects.mysql import insert + from sqlalchemy.engine import Engine + from sqlalchemy.sql import text + + def insert_on_conflict(table, conn, keys, data_iter): + data = [dict(zip(keys, row)) for row in data_iter] + stmt = insert(table.table).values(data) + stmt = stmt.on_duplicate_key_update(b=stmt.inserted.b, c=stmt.inserted.c) + result = conn.execute(stmt) + return result.rowcount + + create_sql = text( + """ + CREATE TABLE test_insert_conflict ( + a INT PRIMARY KEY, + b FLOAT, + c VARCHAR(10) + ); + """ + ) + if isinstance(conn, Engine): + with conn.connect() as con: + with con.begin(): + con.execute(create_sql) + else: + with conn.begin(): + conn.execute(create_sql) + + df = DataFrame([[1, 2.1, "a"]], columns=list("abc")) + df.to_sql(name="test_insert_conflict", con=conn, if_exists="append", index=False) + + expected = DataFrame([[1, 3.2, "b"]], columns=list("abc")) + inserted = expected.to_sql( + name="test_insert_conflict", + con=conn, + index=False, + if_exists="append", + method=insert_on_conflict, + ) + result = sql.read_sql_table("test_insert_conflict", conn) + tm.assert_frame_equal(result, expected) + assert inserted == 2 + + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_insert_conflict") + + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_read_view_postgres(conn, request): + # GH 52969 + conn = request.getfixturevalue(conn) + + from sqlalchemy.engine import Engine + from sqlalchemy.sql import text + + table_name = f"group_{uuid.uuid4().hex}" + view_name = f"group_view_{uuid.uuid4().hex}" + + sql_stmt = text( + f""" + CREATE TABLE {table_name} ( + group_id INTEGER, + name TEXT + ); + INSERT INTO {table_name} VALUES + (1, 'name'); + CREATE VIEW {view_name} + AS + SELECT * FROM {table_name}; + """ + ) + if isinstance(conn, Engine): + with conn.connect() as con: + with con.begin(): + con.execute(sql_stmt) + else: + with conn.begin(): + conn.execute(sql_stmt) + result = read_sql_table(view_name, conn) + expected = DataFrame({"group_id": [1], "name": "name"}) + tm.assert_frame_equal(result, expected) + + +def test_read_view_sqlite(sqlite_buildin): + # GH 52969 + create_table = """ +CREATE TABLE groups ( + group_id INTEGER, + name TEXT +); +""" + insert_into = """ +INSERT INTO groups VALUES + (1, 'name'); +""" + create_view = """ +CREATE VIEW group_view +AS +SELECT * FROM groups; +""" + sqlite_buildin.execute(create_table) + sqlite_buildin.execute(insert_into) + sqlite_buildin.execute(create_view) + result = pd.read_sql("SELECT * FROM group_view", sqlite_buildin) + expected = DataFrame({"group_id": [1], "name": "name"}) + tm.assert_frame_equal(result, expected) + + +def test_execute_typeerror(sqlite_engine_iris): + with pytest.raises(TypeError, match="pandas.io.sql.execute requires a connection"): + with tm.assert_produces_warning( + FutureWarning, + match="`pandas.io.sql.execute` is deprecated and " + "will be removed in the future version.", + ): + sql.execute("select * from iris", sqlite_engine_iris) + + +def test_execute_deprecated(sqlite_conn_iris): + # GH50185 + with tm.assert_produces_warning( + FutureWarning, + match="`pandas.io.sql.execute` is deprecated and " + "will be removed in the future version.", + ): + sql.execute("select * from iris", sqlite_conn_iris) + + +def flavor(conn_name): + if "postgresql" in conn_name: + return "postgresql" + elif "sqlite" in conn_name: + return "sqlite" + elif "mysql" in conn_name: + return "mysql" + + raise ValueError(f"unsupported connection: {conn_name}") + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_iris_parameter(conn, request, sql_strings): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'params' not implemented for ADBC drivers", + strict=True, + ) + ) + conn_name = conn + conn = request.getfixturevalue(conn) + query = sql_strings["read_parameters"][flavor(conn_name)] + params = ("Iris-setosa", 5.1) + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=params) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_iris_named_parameter(conn, request, sql_strings): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'params' not implemented for ADBC drivers", + strict=True, + ) + ) + + conn_name = conn + conn = request.getfixturevalue(conn) + query = sql_strings["read_named_parameters"][flavor(conn_name)] + params = {"name": "Iris-setosa", "length": 5.1} + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=params) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_iris_no_parameter_with_percent(conn, request, sql_strings): + if "mysql" in conn or ("postgresql" in conn and "adbc" not in conn): + request.applymarker(pytest.mark.xfail(reason="broken test")) + + conn_name = conn + conn = request.getfixturevalue(conn) + + query = sql_strings["read_no_parameters_with_percent"][flavor(conn_name)] + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=None) + check_iris_frame(iris_frame) + + +# ----------------------------------------------------------------------------- +# -- Testing the public API + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_api_read_sql_view(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = sql.read_sql_query("SELECT * FROM iris_view", conn) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_api_read_sql_with_chunksize_no_result(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + query = 'SELECT * FROM iris_view WHERE "SepalLength" < 0.0' + with_batch = sql.read_sql_query(query, conn, chunksize=5) + without_batch = sql.read_sql_query(query, conn) + tm.assert_frame_equal(concat(with_batch), without_batch) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame1", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame1") + + sql.to_sql(test_frame1, "test_frame1", conn) + assert sql.has_table("test_frame1", conn) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_fail(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame2", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame2") + + sql.to_sql(test_frame1, "test_frame2", conn, if_exists="fail") + assert sql.has_table("test_frame2", conn) + + msg = "Table 'test_frame2' already exists" + with pytest.raises(ValueError, match=msg): + sql.to_sql(test_frame1, "test_frame2", conn, if_exists="fail") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_replace(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame3", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame3") + + sql.to_sql(test_frame1, "test_frame3", conn, if_exists="fail") + # Add to table again + sql.to_sql(test_frame1, "test_frame3", conn, if_exists="replace") + assert sql.has_table("test_frame3", conn) + + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame3") + + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_append(conn, request, test_frame1): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame4", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame4") + + assert sql.to_sql(test_frame1, "test_frame4", conn, if_exists="fail") == 4 + + # Add to table again + assert sql.to_sql(test_frame1, "test_frame4", conn, if_exists="append") == 4 + assert sql.has_table("test_frame4", conn) + + num_entries = 2 * len(test_frame1) + num_rows = count_rows(conn, "test_frame4") + + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_type_mapping(conn, request, test_frame3): + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame5", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame5") + + sql.to_sql(test_frame3, "test_frame5", conn, index=False) + result = sql.read_sql("SELECT * FROM test_frame5", conn) + + tm.assert_frame_equal(test_frame3, result) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_series(conn, request): + conn = request.getfixturevalue(conn) + if sql.has_table("test_series", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_series") + + s = Series(np.arange(5, dtype="int64"), name="series") + sql.to_sql(s, "test_series", conn, index=False) + s2 = sql.read_sql_query("SELECT * FROM test_series", conn) + tm.assert_frame_equal(s.to_frame(), s2) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_roundtrip(conn, request, test_frame1): + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame_roundtrip", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame_roundtrip") + + sql.to_sql(test_frame1, "test_frame_roundtrip", con=conn) + result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=conn) + + # HACK! + if "adbc" in conn_name: + result = result.rename(columns={"__index_level_0__": "level_0"}) + result.index = test_frame1.index + result.set_index("level_0", inplace=True) + result.index.astype(int) + result.index.name = None + tm.assert_frame_equal(result, test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_roundtrip_chunksize(conn, request, test_frame1): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + if sql.has_table("test_frame_roundtrip", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_frame_roundtrip") + + sql.to_sql( + test_frame1, + "test_frame_roundtrip", + con=conn, + index=False, + chunksize=2, + ) + result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=conn) + tm.assert_frame_equal(result, test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_api_execute_sql(conn, request): + # drop_sql = "DROP TABLE IF EXISTS test" # should already be done + conn = request.getfixturevalue(conn) + with sql.pandasSQL_builder(conn) as pandas_sql: + iris_results = pandas_sql.execute("SELECT * FROM iris") + row = iris_results.fetchone() + iris_results.close() + assert list(row) == [5.1, 3.5, 1.4, 0.2, "Iris-setosa"] + + +@pytest.mark.parametrize("conn", all_connectable_types) +def test_api_date_parsing(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + # Test date parsing in read_sql + # No Parsing + df = sql.read_sql_query("SELECT * FROM types", conn) + if not ("mysql" in conn_name or "postgres" in conn_name): + assert not issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_query("SELECT * FROM types", conn, parse_dates=["DateCol"]) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + Timestamp(2000, 1, 3, 0, 0, 0), + Timestamp(2000, 1, 4, 0, 0, 0), + ] + + df = sql.read_sql_query( + "SELECT * FROM types", + conn, + parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}, + ) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + Timestamp(2000, 1, 3, 0, 0, 0), + Timestamp(2000, 1, 4, 0, 0, 0), + ] + + df = sql.read_sql_query("SELECT * FROM types", conn, parse_dates=["IntDateCol"]) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + Timestamp(1986, 12, 25, 0, 0, 0), + Timestamp(2013, 1, 1, 0, 0, 0), + ] + + df = sql.read_sql_query( + "SELECT * FROM types", conn, parse_dates={"IntDateCol": "s"} + ) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + Timestamp(1986, 12, 25, 0, 0, 0), + Timestamp(2013, 1, 1, 0, 0, 0), + ] + + df = sql.read_sql_query( + "SELECT * FROM types", + conn, + parse_dates={"IntDateOnlyCol": "%Y%m%d"}, + ) + assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64) + assert df.IntDateOnlyCol.tolist() == [ + Timestamp("2010-10-10"), + Timestamp("2010-12-12"), + ] + + +@pytest.mark.parametrize("conn", all_connectable_types) +@pytest.mark.parametrize("error", ["ignore", "raise", "coerce"]) +@pytest.mark.parametrize( + "read_sql, text, mode", + [ + (sql.read_sql, "SELECT * FROM types", ("sqlalchemy", "fallback")), + (sql.read_sql, "types", ("sqlalchemy")), + ( + sql.read_sql_query, + "SELECT * FROM types", + ("sqlalchemy", "fallback"), + ), + (sql.read_sql_table, "types", ("sqlalchemy")), + ], +) +def test_api_custom_dateparsing_error( + conn, request, read_sql, text, mode, error, types_data_frame +): + conn_name = conn + conn = request.getfixturevalue(conn) + if text == "types" and conn_name == "sqlite_buildin_types": + request.applymarker( + pytest.mark.xfail(reason="failing combination of arguments") + ) + + expected = types_data_frame.astype({"DateCol": "datetime64[ns]"}) + + result = read_sql( + text, + con=conn, + parse_dates={ + "DateCol": {"errors": error}, + }, + ) + if "postgres" in conn_name: + # TODO: clean up types_data_frame fixture + result["BoolCol"] = result["BoolCol"].astype(int) + result["BoolColWithNull"] = result["BoolColWithNull"].astype(float) + + if conn_name == "postgresql_adbc_types": + expected = expected.astype( + { + "IntDateCol": "int32", + "IntDateOnlyCol": "int32", + "IntCol": "int32", + } + ) + + if not pa_version_under13p0: + # TODO: is this astype safe? + expected["DateCol"] = expected["DateCol"].astype("datetime64[us]") + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable_types) +def test_api_date_and_index(conn, request): + # Test case where same column appears in parse_date and index_col + conn = request.getfixturevalue(conn) + df = sql.read_sql_query( + "SELECT * FROM types", + conn, + index_col="DateCol", + parse_dates=["DateCol", "IntDateCol"], + ) + + assert issubclass(df.index.dtype.type, np.datetime64) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_timedelta(conn, request): + # see #6921 + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_timedelta", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_timedelta") + + df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame() + + if conn_name == "sqlite_adbc_conn": + request.node.add_marker( + pytest.mark.xfail( + reason="sqlite ADBC driver doesn't implement timedelta", + ) + ) + + if "adbc" in conn_name: + if pa_version_under14p1: + exp_warning = DeprecationWarning + else: + exp_warning = None + else: + exp_warning = UserWarning + + with tm.assert_produces_warning(exp_warning, check_stacklevel=False): + result_count = df.to_sql(name="test_timedelta", con=conn) + assert result_count == 2 + result = sql.read_sql_query("SELECT * FROM test_timedelta", conn) + + if conn_name == "postgresql_adbc_conn": + # TODO: Postgres stores an INTERVAL, which ADBC reads as a Month-Day-Nano + # Interval; the default pandas type mapper maps this to a DateOffset + # but maybe we should try and restore the timedelta here? + expected = Series( + [ + pd.DateOffset(months=0, days=0, microseconds=1000000, nanoseconds=0), + pd.DateOffset(months=0, days=0, microseconds=3000000, nanoseconds=0), + ], + name="foo", + ) + else: + expected = df["foo"].astype("int64") + tm.assert_series_equal(result["foo"], expected) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_complex_raises(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame({"a": [1 + 1j, 2j]}) + + if "adbc" in conn_name: + msg = "datatypes not supported" + else: + msg = "Complex datatypes not supported" + with pytest.raises(ValueError, match=msg): + assert df.to_sql("test_complex", con=conn) is None + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize( + "index_name,index_label,expected", + [ + # no index name, defaults to 'index' + (None, None, "index"), + # specifying index_label + (None, "other_label", "other_label"), + # using the index name + ("index_name", None, "index_name"), + # has index name, but specifying index_label + ("index_name", "other_label", "other_label"), + # index name is integer + (0, None, "0"), + # index name is None but index label is integer + (None, 0, "0"), + ], +) +def test_api_to_sql_index_label(conn, request, index_name, index_label, expected): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="index_label argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + if sql.has_table("test_index_label", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_index_label") + + temp_frame = DataFrame({"col1": range(4)}) + temp_frame.index.name = index_name + query = "SELECT * FROM test_index_label" + sql.to_sql(temp_frame, "test_index_label", conn, index_label=index_label) + frame = sql.read_sql_query(query, conn) + assert frame.columns[0] == expected + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_to_sql_index_label_multiindex(conn, request): + conn_name = conn + if "mysql" in conn_name: + request.applymarker( + pytest.mark.xfail( + reason="MySQL can fail using TEXT without length as key", strict=False + ) + ) + elif "adbc" in conn_name: + request.node.add_marker( + pytest.mark.xfail(reason="index_label argument NotImplemented with ADBC") + ) + + conn = request.getfixturevalue(conn) + if sql.has_table("test_index_label", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_index_label") + + expected_row_count = 4 + temp_frame = DataFrame( + {"col1": range(4)}, + index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]), + ) + + # no index name, defaults to 'level_0' and 'level_1' + result = sql.to_sql(temp_frame, "test_index_label", conn) + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[0] == "level_0" + assert frame.columns[1] == "level_1" + + # specifying index_label + result = sql.to_sql( + temp_frame, + "test_index_label", + conn, + if_exists="replace", + index_label=["A", "B"], + ) + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[:2].tolist() == ["A", "B"] + + # using the index name + temp_frame.index.names = ["A", "B"] + result = sql.to_sql(temp_frame, "test_index_label", conn, if_exists="replace") + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[:2].tolist() == ["A", "B"] + + # has index name, but specifying index_label + result = sql.to_sql( + temp_frame, + "test_index_label", + conn, + if_exists="replace", + index_label=["C", "D"], + ) + assert result == expected_row_count + frame = sql.read_sql_query("SELECT * FROM test_index_label", conn) + assert frame.columns[:2].tolist() == ["C", "D"] + + msg = "Length of 'index_label' should match number of levels, which is 2" + with pytest.raises(ValueError, match=msg): + sql.to_sql( + temp_frame, + "test_index_label", + conn, + if_exists="replace", + index_label="C", + ) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_multiindex_roundtrip(conn, request): + conn = request.getfixturevalue(conn) + if sql.has_table("test_multiindex_roundtrip", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_multiindex_roundtrip") + + df = DataFrame.from_records( + [(1, 2.1, "line1"), (2, 1.5, "line2")], + columns=["A", "B", "C"], + index=["A", "B"], + ) + + df.to_sql(name="test_multiindex_roundtrip", con=conn) + result = sql.read_sql_query( + "SELECT * FROM test_multiindex_roundtrip", conn, index_col=["A", "B"] + ) + tm.assert_frame_equal(df, result, check_index_type=True) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize( + "dtype", + [ + None, + int, + float, + {"A": int, "B": float}, + ], +) +def test_api_dtype_argument(conn, request, dtype): + # GH10285 Add dtype argument to read_sql_query + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_dtype_argument", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_dtype_argument") + + df = DataFrame([[1.2, 3.4], [5.6, 7.8]], columns=["A", "B"]) + assert df.to_sql(name="test_dtype_argument", con=conn) == 2 + + expected = df.astype(dtype) + + if "postgres" in conn_name: + query = 'SELECT "A", "B" FROM test_dtype_argument' + else: + query = "SELECT A, B FROM test_dtype_argument" + result = sql.read_sql_query(query, con=conn, dtype=dtype) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_integer_col_names(conn, request): + conn = request.getfixturevalue(conn) + df = DataFrame([[1, 2], [3, 4]], columns=[0, 1]) + sql.to_sql(df, "test_frame_integer_col_names", conn, if_exists="replace") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema(conn, request, test_frame1): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'get_schema' not implemented for ADBC drivers", + strict=True, + ) + ) + conn = request.getfixturevalue(conn) + create_sql = sql.get_schema(test_frame1, "test", con=conn) + assert "CREATE" in create_sql + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema_with_schema(conn, request, test_frame1): + # GH28486 + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'get_schema' not implemented for ADBC drivers", + strict=True, + ) + ) + conn = request.getfixturevalue(conn) + create_sql = sql.get_schema(test_frame1, "test", con=conn, schema="pypi") + assert "CREATE TABLE pypi." in create_sql + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema_dtypes(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'get_schema' not implemented for ADBC drivers", + strict=True, + ) + ) + conn_name = conn + conn = request.getfixturevalue(conn) + float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]}) + + if conn_name == "sqlite_buildin": + dtype = "INTEGER" + else: + from sqlalchemy import Integer + + dtype = Integer + create_sql = sql.get_schema(float_frame, "test", con=conn, dtype={"b": dtype}) + assert "CREATE" in create_sql + assert "INTEGER" in create_sql + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_get_schema_keys(conn, request, test_frame1): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="'get_schema' not implemented for ADBC drivers", + strict=True, + ) + ) + conn_name = conn + conn = request.getfixturevalue(conn) + frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]}) + create_sql = sql.get_schema(frame, "test", con=conn, keys="Col1") + + if "mysql" in conn_name: + constraint_sentence = "CONSTRAINT test_pk PRIMARY KEY (`Col1`)" + else: + constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")' + assert constraint_sentence in create_sql + + # multiple columns as key (GH10385) + create_sql = sql.get_schema(test_frame1, "test", con=conn, keys=["A", "B"]) + if "mysql" in conn_name: + constraint_sentence = "CONSTRAINT test_pk PRIMARY KEY (`A`, `B`)" + else: + constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")' + assert constraint_sentence in create_sql + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_chunksize_read(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("test_chunksize", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_chunksize") + + df = DataFrame( + np.random.default_rng(2).standard_normal((22, 5)), columns=list("abcde") + ) + df.to_sql(name="test_chunksize", con=conn, index=False) + + # reading the query in one time + res1 = sql.read_sql_query("select * from test_chunksize", conn) + + # reading the query in chunks with read_sql_query + res2 = DataFrame() + i = 0 + sizes = [5, 5, 5, 5, 2] + + for chunk in sql.read_sql_query("select * from test_chunksize", conn, chunksize=5): + res2 = concat([res2, chunk], ignore_index=True) + assert len(chunk) == sizes[i] + i += 1 + + tm.assert_frame_equal(res1, res2) + + # reading the query in chunks with read_sql_query + if conn_name == "sqlite_buildin": + with pytest.raises(NotImplementedError, match=""): + sql.read_sql_table("test_chunksize", conn, chunksize=5) + else: + res3 = DataFrame() + i = 0 + sizes = [5, 5, 5, 5, 2] + + for chunk in sql.read_sql_table("test_chunksize", conn, chunksize=5): + res3 = concat([res3, chunk], ignore_index=True) + assert len(chunk) == sizes[i] + i += 1 + + tm.assert_frame_equal(res1, res3) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_categorical(conn, request): + if conn == "postgresql_adbc_conn": + adbc = import_optional_dependency("adbc_driver_postgresql", errors="ignore") + if adbc is not None and Version(adbc.__version__) < Version("0.9.0"): + request.node.add_marker( + pytest.mark.xfail( + reason="categorical dtype not implemented for ADBC postgres driver", + strict=True, + ) + ) + # GH8624 + # test that categorical gets written correctly as dense column + conn = request.getfixturevalue(conn) + if sql.has_table("test_categorical", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_categorical") + + df = DataFrame( + { + "person_id": [1, 2, 3], + "person_name": ["John P. Doe", "Jane Dove", "John P. Doe"], + } + ) + df2 = df.copy() + df2["person_name"] = df2["person_name"].astype("category") + + df2.to_sql(name="test_categorical", con=conn, index=False) + res = sql.read_sql_query("SELECT * FROM test_categorical", conn) + + tm.assert_frame_equal(res, df) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_unicode_column_name(conn, request): + # GH 11431 + conn = request.getfixturevalue(conn) + if sql.has_table("test_unicode", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_unicode") + + df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"]) + df.to_sql(name="test_unicode", con=conn, index=False) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_escaped_table_name(conn, request): + # GH 13206 + conn_name = conn + conn = request.getfixturevalue(conn) + if sql.has_table("d1187b08-4943-4c8d-a7f6", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("d1187b08-4943-4c8d-a7f6") + + df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]}) + df.to_sql(name="d1187b08-4943-4c8d-a7f6", con=conn, index=False) + + if "postgres" in conn_name: + query = 'SELECT * FROM "d1187b08-4943-4c8d-a7f6"' + else: + query = "SELECT * FROM `d1187b08-4943-4c8d-a7f6`" + res = sql.read_sql_query(query, conn) + + tm.assert_frame_equal(res, df) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_api_read_sql_duplicate_columns(conn, request): + # GH#53117 + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="pyarrow->pandas throws ValueError", strict=True) + ) + conn = request.getfixturevalue(conn) + if sql.has_table("test_table", conn): + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("test_table") + + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": 1}) + df.to_sql(name="test_table", con=conn, index=False) + + result = pd.read_sql("SELECT a, b, a +1 as a, c FROM test_table", conn) + expected = DataFrame( + [[1, 0.1, 2, 1], [2, 0.2, 3, 1], [3, 0.3, 4, 1]], + columns=["a", "b", "a", "c"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_read_table_columns(conn, request, test_frame1): + # test columns argument in read_table + conn_name = conn + if conn_name == "sqlite_buildin": + request.applymarker(pytest.mark.xfail(reason="Not Implemented")) + + conn = request.getfixturevalue(conn) + sql.to_sql(test_frame1, "test_frame", conn) + + cols = ["A", "B"] + + result = sql.read_sql_table("test_frame", conn, columns=cols) + assert result.columns.tolist() == cols + + +@pytest.mark.parametrize("conn", all_connectable) +def test_read_table_index_col(conn, request, test_frame1): + # test columns argument in read_table + conn_name = conn + if conn_name == "sqlite_buildin": + request.applymarker(pytest.mark.xfail(reason="Not Implemented")) + + conn = request.getfixturevalue(conn) + sql.to_sql(test_frame1, "test_frame", conn) + + result = sql.read_sql_table("test_frame", conn, index_col="index") + assert result.index.names == ["index"] + + result = sql.read_sql_table("test_frame", conn, index_col=["A", "B"]) + assert result.index.names == ["A", "B"] + + result = sql.read_sql_table( + "test_frame", conn, index_col=["A", "B"], columns=["C", "D"] + ) + assert result.index.names == ["A", "B"] + assert result.columns.tolist() == ["C", "D"] + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_read_sql_delegate(conn, request): + if conn == "sqlite_buildin_iris": + request.applymarker( + pytest.mark.xfail( + reason="sqlite_buildin connection does not implement read_sql_table" + ) + ) + + conn = request.getfixturevalue(conn) + iris_frame1 = sql.read_sql_query("SELECT * FROM iris", conn) + iris_frame2 = sql.read_sql("SELECT * FROM iris", conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) + + iris_frame1 = sql.read_sql_table("iris", conn) + iris_frame2 = sql.read_sql("iris", conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) + + +def test_not_reflect_all_tables(sqlite_conn): + conn = sqlite_conn + from sqlalchemy import text + from sqlalchemy.engine import Engine + + # create invalid table + query_list = [ + text("CREATE TABLE invalid (x INTEGER, y UNKNOWN);"), + text("CREATE TABLE other_table (x INTEGER, y INTEGER);"), + ] + + for query in query_list: + if isinstance(conn, Engine): + with conn.connect() as conn: + with conn.begin(): + conn.execute(query) + else: + with conn.begin(): + conn.execute(query) + + with tm.assert_produces_warning(None): + sql.read_sql_table("other_table", conn) + sql.read_sql_query("SELECT * FROM other_table", conn) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_warning_case_insensitive_table_name(conn, request, test_frame1): + conn_name = conn + if conn_name == "sqlite_buildin" or "adbc" in conn_name: + request.applymarker(pytest.mark.xfail(reason="Does not raise warning")) + + conn = request.getfixturevalue(conn) + # see gh-7815 + with tm.assert_produces_warning( + UserWarning, + match=( + r"The provided table name 'TABLE1' is not found exactly as such in " + r"the database after writing the table, possibly due to case " + r"sensitivity issues. Consider using lower case table names." + ), + ): + with sql.SQLDatabase(conn) as db: + db.check_case_sensitive("TABLE1", "") + + # Test that the warning is certainly NOT triggered in a normal case. + with tm.assert_produces_warning(None): + test_frame1.to_sql(name="CaseSensitive", con=conn) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_sqlalchemy_type_mapping(conn, request): + conn = request.getfixturevalue(conn) + from sqlalchemy import TIMESTAMP + + # Test Timestamp objects (no datetime64 because of timezone) (GH9085) + df = DataFrame( + {"time": to_datetime(["2014-12-12 01:54", "2014-12-11 02:54"], utc=True)} + ) + with sql.SQLDatabase(conn) as db: + table = sql.SQLTable("test_type", db, frame=df) + # GH 9086: TIMESTAMP is the suggested type for datetimes with timezones + assert isinstance(table.table.c["time"].type, TIMESTAMP) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize( + "integer, expected", + [ + ("int8", "SMALLINT"), + ("Int8", "SMALLINT"), + ("uint8", "SMALLINT"), + ("UInt8", "SMALLINT"), + ("int16", "SMALLINT"), + ("Int16", "SMALLINT"), + ("uint16", "INTEGER"), + ("UInt16", "INTEGER"), + ("int32", "INTEGER"), + ("Int32", "INTEGER"), + ("uint32", "BIGINT"), + ("UInt32", "BIGINT"), + ("int64", "BIGINT"), + ("Int64", "BIGINT"), + (int, "BIGINT" if np.dtype(int).name == "int64" else "INTEGER"), + ], +) +def test_sqlalchemy_integer_mapping(conn, request, integer, expected): + # GH35076 Map pandas integer to optimal SQLAlchemy integer type + conn = request.getfixturevalue(conn) + df = DataFrame([0, 1], columns=["a"], dtype=integer) + with sql.SQLDatabase(conn) as db: + table = sql.SQLTable("test_type", db, frame=df) + + result = str(table.table.c.a.type) + assert result == expected + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize("integer", ["uint64", "UInt64"]) +def test_sqlalchemy_integer_overload_mapping(conn, request, integer): + conn = request.getfixturevalue(conn) + # GH35076 Map pandas integer to optimal SQLAlchemy integer type + df = DataFrame([0, 1], columns=["a"], dtype=integer) + with sql.SQLDatabase(conn) as db: + with pytest.raises( + ValueError, match="Unsigned 64 bit integer datatype is not supported" + ): + sql.SQLTable("test_type", db, frame=df) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_database_uri_string(conn, request, test_frame1): + pytest.importorskip("sqlalchemy") + conn = request.getfixturevalue(conn) + # Test read_sql and .to_sql method with a database URI (GH10654) + # db_uri = 'sqlite:///:memory:' # raises + # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near + # "iris": syntax error [SQL: 'iris'] + with tm.ensure_clean() as name: + db_uri = "sqlite:///" + name + table = "iris" + test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False) + test_frame2 = sql.read_sql(table, db_uri) + test_frame3 = sql.read_sql_table(table, db_uri) + query = "SELECT * FROM iris" + test_frame4 = sql.read_sql_query(query, db_uri) + tm.assert_frame_equal(test_frame1, test_frame2) + tm.assert_frame_equal(test_frame1, test_frame3) + tm.assert_frame_equal(test_frame1, test_frame4) + + +@td.skip_if_installed("pg8000") +@pytest.mark.parametrize("conn", all_connectable) +def test_pg8000_sqlalchemy_passthrough_error(conn, request): + pytest.importorskip("sqlalchemy") + conn = request.getfixturevalue(conn) + # using driver that will not be installed on CI to trigger error + # in sqlalchemy.create_engine -> test passing of this error to user + db_uri = "postgresql+pg8000://user:pass@host/dbname" + with pytest.raises(ImportError, match="pg8000"): + sql.read_sql("select * from table", db_uri) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_query_by_text_obj(conn, request): + # WIP : GH10846 + conn_name = conn + conn = request.getfixturevalue(conn) + from sqlalchemy import text + + if "postgres" in conn_name: + name_text = text('select * from iris where "Name"=:name') + else: + name_text = text("select * from iris where name=:name") + iris_df = sql.read_sql(name_text, conn, params={"name": "Iris-versicolor"}) + all_names = set(iris_df["Name"]) + assert all_names == {"Iris-versicolor"} + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_query_by_select_obj(conn, request): + conn = request.getfixturevalue(conn) + # WIP : GH10846 + from sqlalchemy import ( + bindparam, + select, + ) + + iris = iris_table_metadata() + name_select = select(iris).where(iris.c.Name == bindparam("name")) + iris_df = sql.read_sql(name_select, conn, params={"name": "Iris-setosa"}) + all_names = set(iris_df["Name"]) + assert all_names == {"Iris-setosa"} + + +@pytest.mark.parametrize("conn", all_connectable) +def test_column_with_percentage(conn, request): + # GH 37157 + conn_name = conn + if conn_name == "sqlite_buildin": + request.applymarker(pytest.mark.xfail(reason="Not Implemented")) + + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "%_variation": [3, 4, 5]}) + df.to_sql(name="test_column_percentage", con=conn, index=False) + + res = sql.read_sql_table("test_column_percentage", conn) + + tm.assert_frame_equal(res, df) + + +def test_sql_open_close(test_frame3): + # Test if the IO in the database still work if the connection closed + # between the writing and reading (as in many real situations). + + with tm.ensure_clean() as name: + with closing(sqlite3.connect(name)) as conn: + assert sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) == 4 + + with closing(sqlite3.connect(name)) as conn: + result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn) + + tm.assert_frame_equal(test_frame3, result) + + +@td.skip_if_installed("sqlalchemy") +def test_con_string_import_error(): + conn = "mysql://root@localhost/pandas" + msg = "Using URI string without sqlalchemy installed" + with pytest.raises(ImportError, match=msg): + sql.read_sql("SELECT * FROM iris", conn) + + +@td.skip_if_installed("sqlalchemy") +def test_con_unknown_dbapi2_class_does_not_error_without_sql_alchemy_installed(): + class MockSqliteConnection: + def __init__(self, *args, **kwargs) -> None: + self.conn = sqlite3.Connection(*args, **kwargs) + + def __getattr__(self, name): + return getattr(self.conn, name) + + def close(self): + self.conn.close() + + with contextlib.closing(MockSqliteConnection(":memory:")) as conn: + with tm.assert_produces_warning(UserWarning): + sql.read_sql("SELECT 1", conn) + + +def test_sqlite_read_sql_delegate(sqlite_buildin_iris): + conn = sqlite_buildin_iris + iris_frame1 = sql.read_sql_query("SELECT * FROM iris", conn) + iris_frame2 = sql.read_sql("SELECT * FROM iris", conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) + + msg = "Execution failed on sql 'iris': near \"iris\": syntax error" + with pytest.raises(sql.DatabaseError, match=msg): + sql.read_sql("iris", conn) + + +def test_get_schema2(test_frame1): + # without providing a connection object (available for backwards comp) + create_sql = sql.get_schema(test_frame1, "test") + assert "CREATE" in create_sql + + +def test_sqlite_type_mapping(sqlite_buildin): + # Test Timestamp objects (no datetime64 because of timezone) (GH9085) + conn = sqlite_buildin + df = DataFrame( + {"time": to_datetime(["2014-12-12 01:54", "2014-12-11 02:54"], utc=True)} + ) + db = sql.SQLiteDatabase(conn) + table = sql.SQLiteTable("test_type", db, frame=df) + schema = table.sql_schema() + for col in schema.split("\n"): + if col.split()[0].strip('"') == "time": + assert col.split()[1] == "TIMESTAMP" + + +# ----------------------------------------------------------------------------- +# -- Database flavor specific tests + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_create_table(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import inspect + + temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + assert pandasSQL.to_sql(temp_frame, "temp_frame") == 4 + + insp = inspect(conn) + assert insp.has_table("temp_frame") + + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("temp_frame") + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_drop_table(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import inspect + + temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) + with sql.SQLDatabase(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(temp_frame, "temp_frame") == 4 + + insp = inspect(conn) + assert insp.has_table("temp_frame") + + with pandasSQL.run_transaction(): + pandasSQL.drop_table("temp_frame") + try: + insp.clear_cache() # needed with SQLAlchemy 2.0, unavailable prior + except AttributeError: + pass + assert not insp.has_table("temp_frame") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_roundtrip(conn, request, test_frame1): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn_name = conn + conn = request.getfixturevalue(conn) + pandasSQL = pandasSQL_builder(conn) + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame_roundtrip") == 4 + result = pandasSQL.read_query("SELECT * FROM test_frame_roundtrip") + + if "adbc" in conn_name: + result = result.rename(columns={"__index_level_0__": "level_0"}) + result.set_index("level_0", inplace=True) + # result.index.astype(int) + + result.index.name = None + + tm.assert_frame_equal(result, test_frame1) + + +@pytest.mark.parametrize("conn", all_connectable_iris) +def test_execute_sql(conn, request): + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + iris_results = pandasSQL.execute("SELECT * FROM iris") + row = iris_results.fetchone() + iris_results.close() + assert list(row) == [5.1, 3.5, 1.4, 0.2, "Iris-setosa"] + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_sqlalchemy_read_table(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = sql.read_sql_table("iris", con=conn) + check_iris_frame(iris_frame) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_sqlalchemy_read_table_columns(conn, request): + conn = request.getfixturevalue(conn) + iris_frame = sql.read_sql_table( + "iris", con=conn, columns=["SepalLength", "SepalLength"] + ) + tm.assert_index_equal(iris_frame.columns, Index(["SepalLength", "SepalLength__1"])) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +def test_read_table_absent_raises(conn, request): + conn = request.getfixturevalue(conn) + msg = "Table this_doesnt_exist not found" + with pytest.raises(ValueError, match=msg): + sql.read_sql_table("this_doesnt_exist", con=conn) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_types) +def test_sqlalchemy_default_type_conversion(conn, request): + conn_name = conn + if conn_name == "sqlite_str": + pytest.skip("types tables not created in sqlite_str fixture") + elif "mysql" in conn_name or "sqlite" in conn_name: + request.applymarker( + pytest.mark.xfail(reason="boolean dtype not inferred properly") + ) + + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + + assert issubclass(df.FloatCol.dtype.type, np.floating) + assert issubclass(df.IntCol.dtype.type, np.integer) + assert issubclass(df.BoolCol.dtype.type, np.bool_) + + # Int column with NA values stays as float + assert issubclass(df.IntColWithNull.dtype.type, np.floating) + # Bool column with NA values becomes object + assert issubclass(df.BoolColWithNull.dtype.type, object) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_bigint(conn, request): + # int64 should be converted to BigInteger, GH7433 + conn = request.getfixturevalue(conn) + df = DataFrame(data={"i64": [2**62]}) + assert df.to_sql(name="test_bigint", con=conn, index=False) == 1 + result = sql.read_sql_table("test_bigint", conn) + + tm.assert_frame_equal(df, result) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_types) +def test_default_date_load(conn, request): + conn_name = conn + if conn_name == "sqlite_str": + pytest.skip("types tables not created in sqlite_str fixture") + elif "sqlite" in conn_name: + request.applymarker( + pytest.mark.xfail(reason="sqlite does not read date properly") + ) + + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +@pytest.mark.parametrize("parse_dates", [None, ["DateColWithTz"]]) +def test_datetime_with_timezone_query(conn, request, parse_dates): + # edge case that converts postgresql datetime with time zone types + # to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok + # but should be more natural, so coerce to datetime64[ns] for now + conn = request.getfixturevalue(conn) + expected = create_and_load_postgres_datetz(conn) + + # GH11216 + df = read_sql_query("select * from datetz", conn, parse_dates=parse_dates) + col = df.DateColWithTz + tm.assert_series_equal(col, expected) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_datetime_with_timezone_query_chunksize(conn, request): + conn = request.getfixturevalue(conn) + expected = create_and_load_postgres_datetz(conn) + + df = concat( + list(read_sql_query("select * from datetz", conn, chunksize=1)), + ignore_index=True, + ) + col = df.DateColWithTz + tm.assert_series_equal(col, expected) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_datetime_with_timezone_table(conn, request): + conn = request.getfixturevalue(conn) + expected = create_and_load_postgres_datetz(conn) + result = sql.read_sql_table("datetz", conn) + tm.assert_frame_equal(result, expected.to_frame()) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_with_timezone_roundtrip(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + # GH 9086 + # Write datetimetz data to a db and read it back + # For dbs that support timestamps with timezones, should get back UTC + # otherwise naive data should be returned + expected = DataFrame( + {"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")} + ) + assert expected.to_sql(name="test_datetime_tz", con=conn, index=False) == 3 + + if "postgresql" in conn_name: + # SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC + expected["A"] = expected["A"].dt.tz_convert("UTC") + else: + # Otherwise, timestamps are returned as local, naive + expected["A"] = expected["A"].dt.tz_localize(None) + + result = sql.read_sql_table("test_datetime_tz", conn) + tm.assert_frame_equal(result, expected) + + result = sql.read_sql_query("SELECT * FROM test_datetime_tz", conn) + if "sqlite" in conn_name: + # read_sql_query does not return datetime type like read_sql_table + assert isinstance(result.loc[0, "A"], str) + result["A"] = to_datetime(result["A"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_out_of_bounds_datetime(conn, request): + # GH 26761 + conn = request.getfixturevalue(conn) + data = DataFrame({"date": datetime(9999, 1, 1)}, index=[0]) + assert data.to_sql(name="test_datetime_obb", con=conn, index=False) == 1 + result = sql.read_sql_table("test_datetime_obb", conn) + expected = DataFrame([pd.NaT], columns=["date"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_naive_datetimeindex_roundtrip(conn, request): + # GH 23510 + # Ensure that a naive DatetimeIndex isn't converted to UTC + conn = request.getfixturevalue(conn) + dates = date_range("2018-01-01", periods=5, freq="6h")._with_freq(None) + expected = DataFrame({"nums": range(5)}, index=dates) + assert expected.to_sql(name="foo_table", con=conn, index_label="info_date") == 5 + result = sql.read_sql_table("foo_table", conn, index_col="info_date") + # result index with gain a name from a set_index operation; expected + tm.assert_frame_equal(result, expected, check_names=False) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable_types) +def test_date_parsing(conn, request): + # No Parsing + conn_name = conn + conn = request.getfixturevalue(conn) + df = sql.read_sql_table("types", conn) + expected_type = object if "sqlite" in conn_name else np.datetime64 + assert issubclass(df.DateCol.dtype.type, expected_type) + + df = sql.read_sql_table("types", conn, parse_dates=["DateCol"]) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table( + "types", + conn, + parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}}, + ) + assert issubclass(df.DateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates=["IntDateCol"]) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates={"IntDateCol": "s"}) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + df = sql.read_sql_table("types", conn, parse_dates={"IntDateCol": {"unit": "s"}}) + assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame( + {"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)} + ) + assert df.to_sql(name="test_datetime", con=conn) == 3 + + # with read_table -> type information from schema used + result = sql.read_sql_table("test_datetime", conn) + result = result.drop("index", axis=1) + tm.assert_frame_equal(result, df) + + # with read_sql -> no type information -> sqlite has no native + result = sql.read_sql_query("SELECT * FROM test_datetime", conn) + result = result.drop("index", axis=1) + if "sqlite" in conn_name: + assert isinstance(result.loc[0, "A"], str) + result["A"] = to_datetime(result["A"]) + tm.assert_frame_equal(result, df) + else: + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_NaT(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame( + {"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)} + ) + df.loc[1, "A"] = np.nan + assert df.to_sql(name="test_datetime", con=conn, index=False) == 3 + + # with read_table -> type information from schema used + result = sql.read_sql_table("test_datetime", conn) + tm.assert_frame_equal(result, df) + + # with read_sql -> no type information -> sqlite has no native + result = sql.read_sql_query("SELECT * FROM test_datetime", conn) + if "sqlite" in conn_name: + assert isinstance(result.loc[0, "A"], str) + result["A"] = to_datetime(result["A"], errors="coerce") + tm.assert_frame_equal(result, df) + else: + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_date(conn, request): + # test support for datetime.date + conn = request.getfixturevalue(conn) + df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"]) + assert df.to_sql(name="test_date", con=conn, index=False) == 2 + res = read_sql_table("test_date", conn) + result = res["a"] + expected = to_datetime(df["a"]) + # comes back as datetime64 + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_datetime_time(conn, request, sqlite_buildin): + # test support for datetime.time + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"]) + assert df.to_sql(name="test_time", con=conn, index=False) == 2 + res = read_sql_table("test_time", conn) + tm.assert_frame_equal(res, df) + + # GH8341 + # first, use the fallback to have the sqlite adapter put in place + sqlite_conn = sqlite_buildin + assert sql.to_sql(df, "test_time2", sqlite_conn, index=False) == 2 + res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn) + ref = df.map(lambda _: _.strftime("%H:%M:%S.%f")) + tm.assert_frame_equal(ref, res) # check if adapter is in place + # then test if sqlalchemy is unaffected by the sqlite adapter + assert sql.to_sql(df, "test_time3", conn, index=False) == 2 + if "sqlite" in conn_name: + res = sql.read_sql_query("SELECT * FROM test_time3", conn) + ref = df.map(lambda _: _.strftime("%H:%M:%S.%f")) + tm.assert_frame_equal(ref, res) + res = sql.read_sql_table("test_time3", conn) + tm.assert_frame_equal(df, res) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_mixed_dtype_insert(conn, request): + # see GH6509 + conn = request.getfixturevalue(conn) + s1 = Series(2**25 + 1, dtype=np.int32) + s2 = Series(0.0, dtype=np.float32) + df = DataFrame({"s1": s1, "s2": s2}) + + # write and read again + assert df.to_sql(name="test_read_write", con=conn, index=False) == 1 + df2 = sql.read_sql_table("test_read_write", conn) + + tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_nan_numeric(conn, request): + # NaNs in numeric float column + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]}) + assert df.to_sql(name="test_nan", con=conn, index=False) == 3 + + # with read_table + result = sql.read_sql_table("test_nan", conn) + tm.assert_frame_equal(result, df) + + # with read_sql + result = sql.read_sql_query("SELECT * FROM test_nan", conn) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_nan_fullcolumn(conn, request): + # full NaN column (numeric float column) + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]}) + assert df.to_sql(name="test_nan", con=conn, index=False) == 3 + + # with read_table + result = sql.read_sql_table("test_nan", conn) + tm.assert_frame_equal(result, df) + + # with read_sql -> not type info from table -> stays None + df["B"] = df["B"].astype("object") + df["B"] = None + result = sql.read_sql_query("SELECT * FROM test_nan", conn) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_nan_string(conn, request): + # NaNs in string column + conn = request.getfixturevalue(conn) + df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]}) + assert df.to_sql(name="test_nan", con=conn, index=False) == 3 + + # NaNs are coming back as None + df.loc[2, "B"] = None + + # with read_table + result = sql.read_sql_table("test_nan", conn) + tm.assert_frame_equal(result, df) + + # with read_sql + result = sql.read_sql_query("SELECT * FROM test_nan", conn) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_save_index(conn, request): + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail( + reason="ADBC implementation does not create index", strict=True + ) + ) + conn_name = conn + conn = request.getfixturevalue(conn) + df = DataFrame.from_records( + [(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"] + ) + + tbl_name = "test_to_sql_saves_index" + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(df, tbl_name) == 2 + + if conn_name in {"sqlite_buildin", "sqlite_str"}: + ixs = sql.read_sql_query( + "SELECT * FROM sqlite_master WHERE type = 'index' " + f"AND tbl_name = '{tbl_name}'", + conn, + ) + ix_cols = [] + for ix_name in ixs.name: + ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", conn) + ix_cols.append(ix_info.name.tolist()) + else: + from sqlalchemy import inspect + + insp = inspect(conn) + + ixs = insp.get_indexes(tbl_name) + ix_cols = [i["column_names"] for i in ixs] + + assert ix_cols == [["A"]] + + +@pytest.mark.parametrize("conn", all_connectable) +def test_transactions(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + + stmt = "CREATE TABLE test_trans (A INT, B TEXT)" + if conn_name != "sqlite_buildin" and "adbc" not in conn_name: + from sqlalchemy import text + + stmt = text(stmt) + + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction() as trans: + trans.execute(stmt) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_transaction_rollback(conn, request): + conn_name = conn + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction() as trans: + stmt = "CREATE TABLE test_trans (A INT, B TEXT)" + if "adbc" in conn_name or isinstance(pandasSQL, SQLiteDatabase): + trans.execute(stmt) + else: + from sqlalchemy import text + + stmt = text(stmt) + trans.execute(stmt) + + class DummyException(Exception): + pass + + # Make sure when transaction is rolled back, no rows get inserted + ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')" + if isinstance(pandasSQL, SQLDatabase): + from sqlalchemy import text + + ins_sql = text(ins_sql) + try: + with pandasSQL.run_transaction() as trans: + trans.execute(ins_sql) + raise DummyException("error") + except DummyException: + # ignore raised exception + pass + with pandasSQL.run_transaction(): + res = pandasSQL.read_query("SELECT * FROM test_trans") + assert len(res) == 0 + + # Make sure when transaction is committed, rows do get inserted + with pandasSQL.run_transaction() as trans: + trans.execute(ins_sql) + res2 = pandasSQL.read_query("SELECT * FROM test_trans") + assert len(res2) == 1 + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_get_schema_create_table(conn, request, test_frame3): + # Use a dataframe without a bool column, since MySQL converts bool to + # TINYINT (which read_sql_table returns as an int and causes a dtype + # mismatch) + if conn == "sqlite_str": + request.applymarker( + pytest.mark.xfail(reason="test does not support sqlite_str fixture") + ) + + conn = request.getfixturevalue(conn) + + from sqlalchemy import text + from sqlalchemy.engine import Engine + + tbl = "test_get_schema_create_table" + create_sql = sql.get_schema(test_frame3, tbl, con=conn) + blank_test_df = test_frame3.iloc[:0] + + create_sql = text(create_sql) + if isinstance(conn, Engine): + with conn.connect() as newcon: + with newcon.begin(): + newcon.execute(create_sql) + else: + conn.execute(create_sql) + returned_df = sql.read_sql_table(tbl, conn) + tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_dtype(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + TEXT, + String, + ) + from sqlalchemy.schema import MetaData + + cols = ["A", "B"] + data = [(0.8, True), (0.9, None)] + df = DataFrame(data, columns=cols) + assert df.to_sql(name="dtype_test", con=conn) == 2 + assert df.to_sql(name="dtype_test2", con=conn, dtype={"B": TEXT}) == 2 + meta = MetaData() + meta.reflect(bind=conn) + sqltype = meta.tables["dtype_test2"].columns["B"].type + assert isinstance(sqltype, TEXT) + msg = "The type of B is not a SQLAlchemy type" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="error", con=conn, dtype={"B": str}) + + # GH9083 + assert df.to_sql(name="dtype_test3", con=conn, dtype={"B": String(10)}) == 2 + meta.reflect(bind=conn) + sqltype = meta.tables["dtype_test3"].columns["B"].type + assert isinstance(sqltype, String) + assert sqltype.length == 10 + + # single dtype + assert df.to_sql(name="single_dtype_test", con=conn, dtype=TEXT) == 2 + meta.reflect(bind=conn) + sqltypea = meta.tables["single_dtype_test"].columns["A"].type + sqltypeb = meta.tables["single_dtype_test"].columns["B"].type + assert isinstance(sqltypea, TEXT) + assert isinstance(sqltypeb, TEXT) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_notna_dtype(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn_name = conn + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + Boolean, + DateTime, + Float, + Integer, + ) + from sqlalchemy.schema import MetaData + + cols = { + "Bool": Series([True, None]), + "Date": Series([datetime(2012, 5, 1), None]), + "Int": Series([1, None], dtype="object"), + "Float": Series([1.1, None]), + } + df = DataFrame(cols) + + tbl = "notna_dtype_test" + assert df.to_sql(name=tbl, con=conn) == 2 + _ = sql.read_sql_table(tbl, conn) + meta = MetaData() + meta.reflect(bind=conn) + my_type = Integer if "mysql" in conn_name else Boolean + col_dict = meta.tables[tbl].columns + assert isinstance(col_dict["Bool"].type, my_type) + assert isinstance(col_dict["Date"].type, DateTime) + assert isinstance(col_dict["Int"].type, Integer) + assert isinstance(col_dict["Float"].type, Float) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_double_precision(conn, request): + if conn == "sqlite_str": + pytest.skip("sqlite_str has no inspection system") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + BigInteger, + Float, + Integer, + ) + from sqlalchemy.schema import MetaData + + V = 1.23456789101112131415 + + df = DataFrame( + { + "f32": Series([V], dtype="float32"), + "f64": Series([V], dtype="float64"), + "f64_as_f32": Series([V], dtype="float64"), + "i32": Series([5], dtype="int32"), + "i64": Series([5], dtype="int64"), + } + ) + + assert ( + df.to_sql( + name="test_dtypes", + con=conn, + index=False, + if_exists="replace", + dtype={"f64_as_f32": Float(precision=23)}, + ) + == 1 + ) + res = sql.read_sql_table("test_dtypes", conn) + + # check precision of float64 + assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14) + + # check sql types + meta = MetaData() + meta.reflect(bind=conn) + col_dict = meta.tables["test_dtypes"].columns + assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type) + assert isinstance(col_dict["f32"].type, Float) + assert isinstance(col_dict["f64"].type, Float) + assert isinstance(col_dict["i32"].type, Integer) + assert isinstance(col_dict["i64"].type, BigInteger) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_connectable_issue_example(conn, request): + conn = request.getfixturevalue(conn) + + # This tests the example raised in issue + # https://github.com/pandas-dev/pandas/issues/10104 + from sqlalchemy.engine import Engine + + def test_select(connection): + query = "SELECT test_foo_data FROM test_foo_data" + return sql.read_sql_query(query, con=connection) + + def test_append(connection, data): + data.to_sql(name="test_foo_data", con=connection, if_exists="append") + + def test_connectable(conn): + # https://github.com/sqlalchemy/sqlalchemy/commit/ + # 00b5c10846e800304caa86549ab9da373b42fa5d#r48323973 + foo_data = test_select(conn) + test_append(conn, foo_data) + + def main(connectable): + if isinstance(connectable, Engine): + with connectable.connect() as conn: + with conn.begin(): + test_connectable(conn) + else: + test_connectable(connectable) + + assert ( + DataFrame({"test_foo_data": [0, 1, 2]}).to_sql(name="test_foo_data", con=conn) + == 3 + ) + main(conn) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize( + "input", + [{"foo": [np.inf]}, {"foo": [-np.inf]}, {"foo": [-np.inf], "infe0": ["bar"]}], +) +def test_to_sql_with_negative_npinf(conn, request, input): + # GH 34431 + + df = DataFrame(input) + conn_name = conn + conn = request.getfixturevalue(conn) + + if "mysql" in conn_name: + # GH 36465 + # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error + # for pymysql version >= 0.10 + # TODO(GH#36465): remove this version check after GH 36465 is fixed + pymysql = pytest.importorskip("pymysql") + + if Version(pymysql.__version__) < Version("1.0.3") and "infe0" in df.columns: + mark = pytest.mark.xfail(reason="GH 36465") + request.applymarker(mark) + + msg = "inf cannot be used with MySQL" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="foobar", con=conn, index=False) + else: + assert df.to_sql(name="foobar", con=conn, index=False) == 1 + res = sql.read_sql_table("foobar", conn) + tm.assert_equal(df, res) + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_temporary_table(conn, request): + if conn == "sqlite_str": + pytest.skip("test does not work with str connection") + + conn = request.getfixturevalue(conn) + + from sqlalchemy import ( + Column, + Integer, + Unicode, + select, + ) + from sqlalchemy.orm import ( + Session, + declarative_base, + ) + + test_data = "Hello, World!" + expected = DataFrame({"spam": [test_data]}) + Base = declarative_base() + + class Temporary(Base): + __tablename__ = "temp_test" + __table_args__ = {"prefixes": ["TEMPORARY"]} + id = Column(Integer, primary_key=True) + spam = Column(Unicode(30), nullable=False) + + with Session(conn) as session: + with session.begin(): + conn = session.connection() + Temporary.__table__.create(conn) + session.add(Temporary(spam=test_data)) + session.flush() + df = sql.read_sql_query(sql=select(Temporary.spam), con=conn) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +def test_invalid_engine(conn, request, test_frame1): + if conn == "sqlite_buildin" or "adbc" in conn: + request.applymarker( + pytest.mark.xfail( + reason="SQLiteDatabase/ADBCDatabase does not raise for bad engine" + ) + ) + + conn = request.getfixturevalue(conn) + msg = "engine must be one of 'auto', 'sqlalchemy'" + with pandasSQL_builder(conn) as pandasSQL: + with pytest.raises(ValueError, match=msg): + pandasSQL.to_sql(test_frame1, "test_frame1", engine="bad_engine") + + +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_with_sql_engine(conn, request, test_frame1): + """`to_sql` with the `engine` param""" + # mostly copied from this class's `_to_sql()` method + conn = request.getfixturevalue(conn) + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame1", engine="auto") == 4 + assert pandasSQL.has_table("test_frame1") + + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame1") + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", sqlalchemy_connectable) +def test_options_sqlalchemy(conn, request, test_frame1): + # use the set option + conn = request.getfixturevalue(conn) + with pd.option_context("io.sql.engine", "sqlalchemy"): + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame1") == 4 + assert pandasSQL.has_table("test_frame1") + + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame1") + assert num_rows == num_entries + + +@pytest.mark.parametrize("conn", all_connectable) +def test_options_auto(conn, request, test_frame1): + # use the set option + conn = request.getfixturevalue(conn) + with pd.option_context("io.sql.engine", "auto"): + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(test_frame1, "test_frame1") == 4 + assert pandasSQL.has_table("test_frame1") + + num_entries = len(test_frame1) + num_rows = count_rows(conn, "test_frame1") + assert num_rows == num_entries + + +def test_options_get_engine(): + pytest.importorskip("sqlalchemy") + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + with pd.option_context("io.sql.engine", "sqlalchemy"): + assert isinstance(get_engine("auto"), SQLAlchemyEngine) + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + with pd.option_context("io.sql.engine", "auto"): + assert isinstance(get_engine("auto"), SQLAlchemyEngine) + assert isinstance(get_engine("sqlalchemy"), SQLAlchemyEngine) + + +def test_get_engine_auto_error_message(): + # Expect different error messages from get_engine(engine="auto") + # if engines aren't installed vs. are installed but bad version + pass + # TODO(GH#36893) fill this in when we add more engines + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_query"]) +def test_read_sql_dtype_backend( + conn, + request, + string_storage, + func, + dtype_backend, + dtype_backend_data, + dtype_backend_expected, +): + # GH#50048 + conn_name = conn + conn = request.getfixturevalue(conn) + table = "test" + df = dtype_backend_data + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + with pd.option_context("mode.string_storage", string_storage): + result = getattr(pd, func)( + f"Select * from {table}", conn, dtype_backend=dtype_backend + ) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + tm.assert_frame_equal(result, expected) + + if "adbc" in conn_name: + # adbc does not support chunksize argument + request.applymarker( + pytest.mark.xfail(reason="adbc does not support chunksize argument") + ) + + with pd.option_context("mode.string_storage", string_storage): + iterator = getattr(pd, func)( + f"Select * from {table}", + con=conn, + dtype_backend=dtype_backend, + chunksize=3, + ) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + for result in iterator: + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_table"]) +def test_read_sql_dtype_backend_table( + conn, + request, + string_storage, + func, + dtype_backend, + dtype_backend_data, + dtype_backend_expected, +): + if "sqlite" in conn and "adbc" not in conn: + request.applymarker( + pytest.mark.xfail( + reason=( + "SQLite actually returns proper boolean values via " + "read_sql_table, but before pytest refactor was skipped" + ) + ) + ) + # GH#50048 + conn_name = conn + conn = request.getfixturevalue(conn) + table = "test" + df = dtype_backend_data + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + with pd.option_context("mode.string_storage", string_storage): + result = getattr(pd, func)(table, conn, dtype_backend=dtype_backend) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + tm.assert_frame_equal(result, expected) + + if "adbc" in conn_name: + # adbc does not support chunksize argument + return + + with pd.option_context("mode.string_storage", string_storage): + iterator = getattr(pd, func)( + table, + conn, + dtype_backend=dtype_backend, + chunksize=3, + ) + expected = dtype_backend_expected(string_storage, dtype_backend, conn_name) + for result in iterator: + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_table", "read_sql_query"]) +def test_read_sql_invalid_dtype_backend_table(conn, request, func, dtype_backend_data): + conn = request.getfixturevalue(conn) + table = "test" + df = dtype_backend_data + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + getattr(pd, func)(table, conn, dtype_backend="numpy") + + +@pytest.fixture +def dtype_backend_data() -> DataFrame: + return DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": [True, False, None], + "f": [True, False, True], + "g": ["a", "b", "c"], + "h": ["a", "b", None], + } + ) + + +@pytest.fixture +def dtype_backend_expected(): + def func(storage, dtype_backend, conn_name) -> DataFrame: + string_array: StringArray | ArrowStringArray + string_array_na: StringArray | ArrowStringArray + if storage == "python": + string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) + string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) # type: ignore[assignment] + string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) # type: ignore[assignment] + + else: + pa = pytest.importorskip("pyarrow") + string_array = ArrowStringArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArray(pa.array(["a", "b", None])) + + df = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": Series([True, False, pd.NA], dtype="boolean"), + "f": Series([True, False, True], dtype="boolean"), + "g": string_array, + "h": string_array_na, + } + ) + if dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + + from pandas.arrays import ArrowExtensionArray + + df = DataFrame( + { + col: ArrowExtensionArray(pa.array(df[col], from_pandas=True)) + for col in df.columns + } + ) + + if "mysql" in conn_name or "sqlite" in conn_name: + if dtype_backend == "numpy_nullable": + df = df.astype({"e": "Int64", "f": "Int64"}) + else: + df = df.astype({"e": "int64[pyarrow]", "f": "int64[pyarrow]"}) + + return df + + return func + + +@pytest.mark.parametrize("conn", all_connectable) +def test_chunksize_empty_dtypes(conn, request): + # GH#50245 + if "adbc" in conn: + request.node.add_marker( + pytest.mark.xfail(reason="chunksize argument NotImplemented with ADBC") + ) + conn = request.getfixturevalue(conn) + dtypes = {"a": "int64", "b": "object"} + df = DataFrame(columns=["a", "b"]).astype(dtypes) + expected = df.copy() + df.to_sql(name="test", con=conn, index=False, if_exists="replace") + + for result in read_sql_query( + "SELECT * FROM test", + conn, + dtype=dtypes, + chunksize=1, + ): + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("conn", all_connectable) +@pytest.mark.parametrize("dtype_backend", [lib.no_default, "numpy_nullable"]) +@pytest.mark.parametrize("func", ["read_sql", "read_sql_query"]) +def test_read_sql_dtype(conn, request, func, dtype_backend): + # GH#50797 + conn = request.getfixturevalue(conn) + table = "test" + df = DataFrame({"a": [1, 2, 3], "b": 5}) + df.to_sql(name=table, con=conn, index=False, if_exists="replace") + + result = getattr(pd, func)( + f"Select * from {table}", + conn, + dtype={"a": np.float64}, + dtype_backend=dtype_backend, + ) + expected = DataFrame( + { + "a": Series([1, 2, 3], dtype=np.float64), + "b": Series( + [5, 5, 5], + dtype="int64" if not dtype_backend == "numpy_nullable" else "Int64", + ), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_keyword_deprecation(sqlite_engine): + conn = sqlite_engine + # GH 54397 + msg = ( + "Starting with pandas version 3.0 all arguments of to_sql except for the " + "arguments 'name' and 'con' will be keyword-only." + ) + df = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 1, "B": 2, "C": 3}]) + df.to_sql("example", conn) + + with tm.assert_produces_warning(FutureWarning, match=msg): + df.to_sql("example", conn, None, if_exists="replace") + + +def test_bigint_warning(sqlite_engine): + conn = sqlite_engine + # test no warning for BIGINT (to support int64) is raised (GH7433) + df = DataFrame({"a": [1, 2]}, dtype="int64") + assert df.to_sql(name="test_bigintwarning", con=conn, index=False) == 2 + + with tm.assert_produces_warning(None): + sql.read_sql_table("test_bigintwarning", conn) + + +def test_valueerror_exception(sqlite_engine): + conn = sqlite_engine + df = DataFrame({"col1": [1, 2], "col2": [3, 4]}) + with pytest.raises(ValueError, match="Empty table name specified"): + df.to_sql(name="", con=conn, if_exists="replace", index=False) + + +def test_row_object_is_named_tuple(sqlite_engine): + conn = sqlite_engine + # GH 40682 + # Test for the is_named_tuple() function + # Placed here due to its usage of sqlalchemy + + from sqlalchemy import ( + Column, + Integer, + String, + ) + from sqlalchemy.orm import ( + declarative_base, + sessionmaker, + ) + + BaseModel = declarative_base() + + class Test(BaseModel): + __tablename__ = "test_frame" + id = Column(Integer, primary_key=True) + string_column = Column(String(50)) + + with conn.begin(): + BaseModel.metadata.create_all(conn) + Session = sessionmaker(bind=conn) + with Session() as session: + df = DataFrame({"id": [0, 1], "string_column": ["hello", "world"]}) + assert ( + df.to_sql(name="test_frame", con=conn, index=False, if_exists="replace") + == 2 + ) + session.commit() + test_query = session.query(Test.id, Test.string_column) + df = DataFrame(test_query) + + assert list(df.columns) == ["id", "string_column"] + + +def test_read_sql_string_inference(sqlite_engine): + conn = sqlite_engine + # GH#54430 + pytest.importorskip("pyarrow") + table = "test" + df = DataFrame({"a": ["x", "y"]}) + df.to_sql(table, con=conn, index=False, if_exists="replace") + + with pd.option_context("future.infer_string", True): + result = read_sql_table(table, conn) + + dtype = "string[pyarrow_numpy]" + expected = DataFrame( + {"a": ["x", "y"]}, dtype=dtype, columns=Index(["a"], dtype=dtype) + ) + + tm.assert_frame_equal(result, expected) + + +def test_roundtripping_datetimes(sqlite_engine): + conn = sqlite_engine + # GH#54877 + df = DataFrame({"t": [datetime(2020, 12, 31, 12)]}, dtype="datetime64[ns]") + df.to_sql("test", conn, if_exists="replace", index=False) + result = pd.read_sql("select * from test", conn).iloc[0, 0] + assert result == "2020-12-31 12:00:00.000000" + + +@pytest.fixture +def sqlite_builtin_detect_types(): + with contextlib.closing( + sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES) + ) as closing_conn: + with closing_conn as conn: + yield conn + + +def test_roundtripping_datetimes_detect_types(sqlite_builtin_detect_types): + # https://github.com/pandas-dev/pandas/issues/55554 + conn = sqlite_builtin_detect_types + df = DataFrame({"t": [datetime(2020, 12, 31, 12)]}, dtype="datetime64[ns]") + df.to_sql("test", conn, if_exists="replace", index=False) + result = pd.read_sql("select * from test", conn).iloc[0, 0] + assert result == Timestamp("2020-12-31 12:00:00.000000") + + +@pytest.mark.db +def test_psycopg2_schema_support(postgresql_psycopg2_engine): + conn = postgresql_psycopg2_engine + + # only test this for postgresql (schema's not supported in + # mysql/sqlite) + df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]}) + + # create a schema + with conn.connect() as con: + with con.begin(): + con.exec_driver_sql("DROP SCHEMA IF EXISTS other CASCADE;") + con.exec_driver_sql("CREATE SCHEMA other;") + + # write dataframe to different schema's + assert df.to_sql(name="test_schema_public", con=conn, index=False) == 2 + assert ( + df.to_sql( + name="test_schema_public_explicit", + con=conn, + index=False, + schema="public", + ) + == 2 + ) + assert ( + df.to_sql(name="test_schema_other", con=conn, index=False, schema="other") == 2 + ) + + # read dataframes back in + res1 = sql.read_sql_table("test_schema_public", conn) + tm.assert_frame_equal(df, res1) + res2 = sql.read_sql_table("test_schema_public_explicit", conn) + tm.assert_frame_equal(df, res2) + res3 = sql.read_sql_table("test_schema_public_explicit", conn, schema="public") + tm.assert_frame_equal(df, res3) + res4 = sql.read_sql_table("test_schema_other", conn, schema="other") + tm.assert_frame_equal(df, res4) + msg = "Table test_schema_other not found" + with pytest.raises(ValueError, match=msg): + sql.read_sql_table("test_schema_other", conn, schema="public") + + # different if_exists options + + # create a schema + with conn.connect() as con: + with con.begin(): + con.exec_driver_sql("DROP SCHEMA IF EXISTS other CASCADE;") + con.exec_driver_sql("CREATE SCHEMA other;") + + # write dataframe with different if_exists options + assert ( + df.to_sql(name="test_schema_other", con=conn, schema="other", index=False) == 2 + ) + df.to_sql( + name="test_schema_other", + con=conn, + schema="other", + index=False, + if_exists="replace", + ) + assert ( + df.to_sql( + name="test_schema_other", + con=conn, + schema="other", + index=False, + if_exists="append", + ) + == 2 + ) + res = sql.read_sql_table("test_schema_other", conn, schema="other") + tm.assert_frame_equal(concat([df, df], ignore_index=True), res) + + +@pytest.mark.db +def test_self_join_date_columns(postgresql_psycopg2_engine): + # GH 44421 + conn = postgresql_psycopg2_engine + from sqlalchemy.sql import text + + create_table = text( + """ + CREATE TABLE person + ( + id serial constraint person_pkey primary key, + created_dt timestamp with time zone + ); + + INSERT INTO person + VALUES (1, '2021-01-01T00:00:00Z'); + """ + ) + with conn.connect() as con: + with con.begin(): + con.execute(create_table) + + sql_query = ( + 'SELECT * FROM "person" AS p1 INNER JOIN "person" AS p2 ON p1.id = p2.id;' + ) + result = pd.read_sql(sql_query, conn) + expected = DataFrame( + [[1, Timestamp("2021", tz="UTC")] * 2], columns=["id", "created_dt"] * 2 + ) + tm.assert_frame_equal(result, expected) + + # Cleanup + with sql.SQLDatabase(conn, need_transaction=True) as pandasSQL: + pandasSQL.drop_table("person") + + +def test_create_and_drop_table(sqlite_engine): + conn = sqlite_engine + temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) + with sql.SQLDatabase(conn) as pandasSQL: + with pandasSQL.run_transaction(): + assert pandasSQL.to_sql(temp_frame, "drop_test_frame") == 4 + + assert pandasSQL.has_table("drop_test_frame") + + with pandasSQL.run_transaction(): + pandasSQL.drop_table("drop_test_frame") + + assert not pandasSQL.has_table("drop_test_frame") + + +def test_sqlite_datetime_date(sqlite_buildin): + conn = sqlite_buildin + df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"]) + assert df.to_sql(name="test_date", con=conn, index=False) == 2 + res = read_sql_query("SELECT * FROM test_date", conn) + # comes back as strings + tm.assert_frame_equal(res, df.astype(str)) + + +@pytest.mark.parametrize("tz_aware", [False, True]) +def test_sqlite_datetime_time(tz_aware, sqlite_buildin): + conn = sqlite_buildin + # test support for datetime.time, GH #8341 + if not tz_aware: + tz_times = [time(9, 0, 0), time(9, 1, 30)] + else: + tz_dt = date_range("2013-01-01 09:00:00", periods=2, tz="US/Pacific") + tz_times = Series(tz_dt.to_pydatetime()).map(lambda dt: dt.timetz()) + + df = DataFrame(tz_times, columns=["a"]) + + assert df.to_sql(name="test_time", con=conn, index=False) == 2 + res = read_sql_query("SELECT * FROM test_time", conn) + # comes back as strings + expected = df.map(lambda _: _.strftime("%H:%M:%S.%f")) + tm.assert_frame_equal(res, expected) + + +def get_sqlite_column_type(conn, table, column): + recs = conn.execute(f"PRAGMA table_info({table})") + for cid, name, ctype, not_null, default, pk in recs: + if name == column: + return ctype + raise ValueError(f"Table {table}, column {column} not found") + + +def test_sqlite_test_dtype(sqlite_buildin): + conn = sqlite_buildin + cols = ["A", "B"] + data = [(0.8, True), (0.9, None)] + df = DataFrame(data, columns=cols) + assert df.to_sql(name="dtype_test", con=conn) == 2 + assert df.to_sql(name="dtype_test2", con=conn, dtype={"B": "STRING"}) == 2 + + # sqlite stores Boolean values as INTEGER + assert get_sqlite_column_type(conn, "dtype_test", "B") == "INTEGER" + + assert get_sqlite_column_type(conn, "dtype_test2", "B") == "STRING" + msg = r"B \(\) not a string" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="error", con=conn, dtype={"B": bool}) + + # single dtype + assert df.to_sql(name="single_dtype_test", con=conn, dtype="STRING") == 2 + assert get_sqlite_column_type(conn, "single_dtype_test", "A") == "STRING" + assert get_sqlite_column_type(conn, "single_dtype_test", "B") == "STRING" + + +def test_sqlite_notna_dtype(sqlite_buildin): + conn = sqlite_buildin + cols = { + "Bool": Series([True, None]), + "Date": Series([datetime(2012, 5, 1), None]), + "Int": Series([1, None], dtype="object"), + "Float": Series([1.1, None]), + } + df = DataFrame(cols) + + tbl = "notna_dtype_test" + assert df.to_sql(name=tbl, con=conn) == 2 + + assert get_sqlite_column_type(conn, tbl, "Bool") == "INTEGER" + assert get_sqlite_column_type(conn, tbl, "Date") == "TIMESTAMP" + assert get_sqlite_column_type(conn, tbl, "Int") == "INTEGER" + assert get_sqlite_column_type(conn, tbl, "Float") == "REAL" + + +def test_sqlite_illegal_names(sqlite_buildin): + # For sqlite, these should work fine + conn = sqlite_buildin + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) + + msg = "Empty table or column name specified" + with pytest.raises(ValueError, match=msg): + df.to_sql(name="", con=conn) + + for ndx, weird_name in enumerate( + [ + "test_weird_name]", + "test_weird_name[", + "test_weird_name`", + 'test_weird_name"', + "test_weird_name'", + "_b.test_weird_name_01-30", + '"_b.test_weird_name_01-30"', + "99beginswithnumber", + "12345", + "\xe9", + ] + ): + assert df.to_sql(name=weird_name, con=conn) == 2 + sql.table_exists(weird_name, conn) + + df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name]) + c_tbl = f"test_weird_col_name{ndx:d}" + assert df2.to_sql(name=c_tbl, con=conn) == 2 + sql.table_exists(c_tbl, conn) + + +def format_query(sql, *args): + _formatters = { + datetime: "'{}'".format, + str: "'{}'".format, + np.str_: "'{}'".format, + bytes: "'{}'".format, + float: "{:.8f}".format, + int: "{:d}".format, + type(None): lambda x: "NULL", + np.float64: "{:.10f}".format, + bool: "'{!s}'".format, + } + processed_args = [] + for arg in args: + if isinstance(arg, float) and isna(arg): + arg = None + + formatter = _formatters[type(arg)] + processed_args.append(formatter(arg)) + + return sql % tuple(processed_args) + + +def tquery(query, con=None): + """Replace removed sql.tquery function""" + with sql.pandasSQL_builder(con) as pandas_sql: + res = pandas_sql.execute(query).fetchall() + return None if res is None else list(res) + + +def test_xsqlite_basic(sqlite_buildin): + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + assert sql.to_sql(frame, name="test_table", con=sqlite_buildin, index=False) == 10 + result = sql.read_sql("select * from test_table", sqlite_buildin) + + # HACK! Change this once indexes are handled properly. + result.index = frame.index + + expected = frame + tm.assert_frame_equal(result, frame) + + frame["txt"] = ["a"] * len(frame) + frame2 = frame.copy() + new_idx = Index(np.arange(len(frame2)), dtype=np.int64) + 10 + frame2["Idx"] = new_idx.copy() + assert sql.to_sql(frame2, name="test_table2", con=sqlite_buildin, index=False) == 10 + result = sql.read_sql("select * from test_table2", sqlite_buildin, index_col="Idx") + expected = frame.copy() + expected.index = new_idx + expected.index.name = "Idx" + tm.assert_frame_equal(expected, result) + + +def test_xsqlite_write_row_by_row(sqlite_buildin): + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + frame.iloc[0, 0] = np.nan + create_sql = sql.get_schema(frame, "test") + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + + ins = "INSERT INTO test VALUES (%s, %s, %s, %s)" + for _, row in frame.iterrows(): + fmt_sql = format_query(ins, *row) + tquery(fmt_sql, con=sqlite_buildin) + + sqlite_buildin.commit() + + result = sql.read_sql("select * from test", con=sqlite_buildin) + result.index = frame.index + tm.assert_frame_equal(result, frame, rtol=1e-3) + + +def test_xsqlite_execute(sqlite_buildin): + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + create_sql = sql.get_schema(frame, "test") + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + ins = "INSERT INTO test VALUES (?, ?, ?, ?)" + + row = frame.iloc[0] + with sql.pandasSQL_builder(sqlite_buildin) as pandas_sql: + pandas_sql.execute(ins, tuple(row)) + sqlite_buildin.commit() + + result = sql.read_sql("select * from test", sqlite_buildin) + result.index = frame.index[:1] + tm.assert_frame_equal(result, frame[:1]) + + +def test_xsqlite_schema(sqlite_buildin): + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + create_sql = sql.get_schema(frame, "test") + lines = create_sql.splitlines() + for line in lines: + tokens = line.split(" ") + if len(tokens) == 2 and tokens[0] == "A": + assert tokens[1] == "DATETIME" + + create_sql = sql.get_schema(frame, "test", keys=["A", "B"]) + lines = create_sql.splitlines() + assert 'PRIMARY KEY ("A", "B")' in create_sql + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + + +def test_xsqlite_execute_fail(sqlite_buildin): + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a, b) + ); + """ + cur = sqlite_buildin.cursor() + cur.execute(create_sql) + + with sql.pandasSQL_builder(sqlite_buildin) as pandas_sql: + pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)') + pandas_sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)') + + with pytest.raises(sql.DatabaseError, match="Execution failed on sql"): + pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 7)') + + +def test_xsqlite_execute_closed_connection(): + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a, b) + ); + """ + with contextlib.closing(sqlite3.connect(":memory:")) as conn: + cur = conn.cursor() + cur.execute(create_sql) + + with sql.pandasSQL_builder(conn) as pandas_sql: + pandas_sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)') + + msg = "Cannot operate on a closed database." + with pytest.raises(sqlite3.ProgrammingError, match=msg): + tquery("select * from test", con=conn) + + +def test_xsqlite_keyword_as_column_names(sqlite_buildin): + df = DataFrame({"From": np.ones(5)}) + assert sql.to_sql(df, con=sqlite_buildin, name="testkeywords", index=False) == 5 + + +def test_xsqlite_onecolumn_of_integer(sqlite_buildin): + # GH 3628 + # a column_of_integers dataframe should transfer well to sql + + mono_df = DataFrame([1, 2], columns=["c0"]) + assert sql.to_sql(mono_df, con=sqlite_buildin, name="mono_df", index=False) == 2 + # computing the sum via sql + con_x = sqlite_buildin + the_sum = sum(my_c0[0] for my_c0 in con_x.execute("select * from mono_df")) + # it should not fail, and gives 3 ( Issue #3628 ) + assert the_sum == 3 + + result = sql.read_sql("select * from mono_df", con_x) + tm.assert_frame_equal(result, mono_df) + + +def test_xsqlite_if_exists(sqlite_buildin): + df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]}) + df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]}) + table_name = "table_if_exists" + sql_select = f"SELECT * FROM {table_name}" + + msg = "'notvalidvalue' is not valid for if_exists" + with pytest.raises(ValueError, match=msg): + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="notvalidvalue", + ) + drop_table(table_name, sqlite_buildin) + + # test if_exists='fail' + sql.to_sql( + frame=df_if_exists_1, con=sqlite_buildin, name=table_name, if_exists="fail" + ) + msg = "Table 'table_if_exists' already exists" + with pytest.raises(ValueError, match=msg): + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="fail", + ) + # test if_exists='replace' + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="replace", + index=False, + ) + assert tquery(sql_select, con=sqlite_buildin) == [(1, "A"), (2, "B")] + assert ( + sql.to_sql( + frame=df_if_exists_2, + con=sqlite_buildin, + name=table_name, + if_exists="replace", + index=False, + ) + == 3 + ) + assert tquery(sql_select, con=sqlite_buildin) == [(3, "C"), (4, "D"), (5, "E")] + drop_table(table_name, sqlite_buildin) + + # test if_exists='append' + assert ( + sql.to_sql( + frame=df_if_exists_1, + con=sqlite_buildin, + name=table_name, + if_exists="fail", + index=False, + ) + == 2 + ) + assert tquery(sql_select, con=sqlite_buildin) == [(1, "A"), (2, "B")] + assert ( + sql.to_sql( + frame=df_if_exists_2, + con=sqlite_buildin, + name=table_name, + if_exists="append", + index=False, + ) + == 3 + ) + assert tquery(sql_select, con=sqlite_buildin) == [ + (1, "A"), + (2, "B"), + (3, "C"), + (4, "D"), + (5, "E"), + ] + drop_table(table_name, sqlite_buildin) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_stata.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_stata.py new file mode 100644 index 0000000000000000000000000000000000000000..6bd74faa8a3dbbae94f2a8fd79aa23bf677e6220 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_stata.py @@ -0,0 +1,2381 @@ +import bz2 +import datetime as dt +from datetime import datetime +import gzip +import io +import os +import struct +import tarfile +import zipfile + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import CategoricalDtype +import pandas._testing as tm +from pandas.core.frame import ( + DataFrame, + Series, +) + +from pandas.io.parsers import read_csv +from pandas.io.stata import ( + CategoricalConversionWarning, + InvalidColumnName, + PossiblePrecisionLoss, + StataMissingValue, + StataReader, + StataWriter, + StataWriterUTF8, + ValueLabelTypeMismatch, + read_stata, +) + + +@pytest.fixture +def mixed_frame(): + return DataFrame( + { + "a": [1, 2, 3, 4], + "b": [1.0, 3.0, 27.0, 81.0], + "c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"], + } + ) + + +@pytest.fixture +def parsed_114(datapath): + dta14_114 = datapath("io", "data", "stata", "stata5_114.dta") + parsed_114 = read_stata(dta14_114, convert_dates=True) + parsed_114.index.name = "index" + return parsed_114 + + +class TestStata: + def read_dta(self, file): + # Legacy default reader configuration + return read_stata(file, convert_dates=True) + + def read_csv(self, file): + return read_csv(file, parse_dates=True) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_empty_dta(self, version): + empty_ds = DataFrame(columns=["unit"]) + # GH 7369, make sure can read a 0-obs dta file + with tm.ensure_clean() as path: + empty_ds.to_stata(path, write_index=False, version=version) + empty_ds2 = read_stata(path) + tm.assert_frame_equal(empty_ds, empty_ds2) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_empty_dta_with_dtypes(self, version): + # GH 46240 + # Fixing above bug revealed that types are not correctly preserved when + # writing empty DataFrames + empty_df_typed = DataFrame( + { + "i8": np.array([0], dtype=np.int8), + "i16": np.array([0], dtype=np.int16), + "i32": np.array([0], dtype=np.int32), + "i64": np.array([0], dtype=np.int64), + "u8": np.array([0], dtype=np.uint8), + "u16": np.array([0], dtype=np.uint16), + "u32": np.array([0], dtype=np.uint32), + "u64": np.array([0], dtype=np.uint64), + "f32": np.array([0], dtype=np.float32), + "f64": np.array([0], dtype=np.float64), + } + ) + expected = empty_df_typed.copy() + # No uint# support. Downcast since values in range for int# + expected["u8"] = expected["u8"].astype(np.int8) + expected["u16"] = expected["u16"].astype(np.int16) + expected["u32"] = expected["u32"].astype(np.int32) + # No int64 supported at all. Downcast since values in range for int32 + expected["u64"] = expected["u64"].astype(np.int32) + expected["i64"] = expected["i64"].astype(np.int32) + + # GH 7369, make sure can read a 0-obs dta file + with tm.ensure_clean() as path: + empty_df_typed.to_stata(path, write_index=False, version=version) + empty_reread = read_stata(path) + tm.assert_frame_equal(expected, empty_reread) + tm.assert_series_equal(expected.dtypes, empty_reread.dtypes) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_index_col_none(self, version): + df = DataFrame({"a": range(5), "b": ["b1", "b2", "b3", "b4", "b5"]}) + # GH 7369, make sure can read a 0-obs dta file + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False, version=version) + read_df = read_stata(path) + + assert isinstance(read_df.index, pd.RangeIndex) + expected = df.copy() + expected["a"] = expected["a"].astype(np.int32) + tm.assert_frame_equal(read_df, expected, check_index_type=True) + + @pytest.mark.parametrize("file", ["stata1_114", "stata1_117"]) + def test_read_dta1(self, file, datapath): + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + + # Pandas uses np.nan as missing value. + # Thus, all columns will be of type float, regardless of their name. + expected = DataFrame( + [(np.nan, np.nan, np.nan, np.nan, np.nan)], + columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"], + ) + + # this is an oddity as really the nan should be float64, but + # the casting doesn't fail so need to match stata here + expected["float_miss"] = expected["float_miss"].astype(np.float32) + + tm.assert_frame_equal(parsed, expected) + + def test_read_dta2(self, datapath): + expected = DataFrame.from_records( + [ + ( + datetime(2006, 11, 19, 23, 13, 20), + 1479596223000, + datetime(2010, 1, 20), + datetime(2010, 1, 8), + datetime(2010, 1, 1), + datetime(1974, 7, 1), + datetime(2010, 1, 1), + datetime(2010, 1, 1), + ), + ( + datetime(1959, 12, 31, 20, 3, 20), + -1479590, + datetime(1953, 10, 2), + datetime(1948, 6, 10), + datetime(1955, 1, 1), + datetime(1955, 7, 1), + datetime(1955, 1, 1), + datetime(2, 1, 1), + ), + (pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT), + ], + columns=[ + "datetime_c", + "datetime_big_c", + "date", + "weekly_date", + "monthly_date", + "quarterly_date", + "half_yearly_date", + "yearly_date", + ], + ) + expected["yearly_date"] = expected["yearly_date"].astype("O") + + path1 = datapath("io", "data", "stata", "stata2_114.dta") + path2 = datapath("io", "data", "stata", "stata2_115.dta") + path3 = datapath("io", "data", "stata", "stata2_117.dta") + + with tm.assert_produces_warning(UserWarning): + parsed_114 = self.read_dta(path1) + with tm.assert_produces_warning(UserWarning): + parsed_115 = self.read_dta(path2) + with tm.assert_produces_warning(UserWarning): + parsed_117 = self.read_dta(path3) + # FIXME: don't leave commented-out + # 113 is buggy due to limits of date format support in Stata + # parsed_113 = self.read_dta( + # datapath("io", "data", "stata", "stata2_113.dta") + # ) + + # FIXME: don't leave commented-out + # buggy test because of the NaT comparison on certain platforms + # Format 113 test fails since it does not support tc and tC formats + # tm.assert_frame_equal(parsed_113, expected) + tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True) + tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True) + tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True) + + @pytest.mark.parametrize( + "file", ["stata3_113", "stata3_114", "stata3_115", "stata3_117"] + ) + def test_read_dta3(self, file, datapath): + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + + # match stata here + expected = self.read_csv(datapath("io", "data", "stata", "stata3.csv")) + expected = expected.astype(np.float32) + expected["year"] = expected["year"].astype(np.int16) + expected["quarter"] = expected["quarter"].astype(np.int8) + + tm.assert_frame_equal(parsed, expected) + + @pytest.mark.parametrize( + "file", ["stata4_113", "stata4_114", "stata4_115", "stata4_117"] + ) + def test_read_dta4(self, file, datapath): + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + + expected = DataFrame.from_records( + [ + ["one", "ten", "one", "one", "one"], + ["two", "nine", "two", "two", "two"], + ["three", "eight", "three", "three", "three"], + ["four", "seven", 4, "four", "four"], + ["five", "six", 5, np.nan, "five"], + ["six", "five", 6, np.nan, "six"], + ["seven", "four", 7, np.nan, "seven"], + ["eight", "three", 8, np.nan, "eight"], + ["nine", "two", 9, np.nan, "nine"], + ["ten", "one", "ten", np.nan, "ten"], + ], + columns=[ + "fully_labeled", + "fully_labeled2", + "incompletely_labeled", + "labeled_with_missings", + "float_labelled", + ], + ) + + # these are all categoricals + for col in expected: + orig = expected[col].copy() + + categories = np.asarray(expected["fully_labeled"][orig.notna()]) + if col == "incompletely_labeled": + categories = orig + + cat = orig.astype("category")._values + cat = cat.set_categories(categories, ordered=True) + cat.categories.rename(None, inplace=True) + + expected[col] = cat + + # stata doesn't save .category metadata + tm.assert_frame_equal(parsed, expected) + + # File containing strls + def test_read_dta12(self, datapath): + parsed_117 = self.read_dta(datapath("io", "data", "stata", "stata12_117.dta")) + expected = DataFrame.from_records( + [ + [1, "abc", "abcdefghi"], + [3, "cba", "qwertywertyqwerty"], + [93, "", "strl"], + ], + columns=["x", "y", "z"], + ) + + tm.assert_frame_equal(parsed_117, expected, check_dtype=False) + + def test_read_dta18(self, datapath): + parsed_118 = self.read_dta(datapath("io", "data", "stata", "stata14_118.dta")) + parsed_118["Bytes"] = parsed_118["Bytes"].astype("O") + expected = DataFrame.from_records( + [ + ["Cat", "Bogota", "Bogotá", 1, 1.0, "option b Ünicode", 1.0], + ["Dog", "Boston", "Uzunköprü", np.nan, np.nan, np.nan, np.nan], + ["Plane", "Rome", "Tromsø", 0, 0.0, "option a", 0.0], + ["Potato", "Tokyo", "Elâzığ", -4, 4.0, 4, 4], # noqa: RUF001 + ["", "", "", 0, 0.3332999, "option a", 1 / 3.0], + ], + columns=[ + "Things", + "Cities", + "Unicode_Cities_Strl", + "Ints", + "Floats", + "Bytes", + "Longs", + ], + ) + expected["Floats"] = expected["Floats"].astype(np.float32) + for col in parsed_118.columns: + tm.assert_almost_equal(parsed_118[col], expected[col]) + + with StataReader(datapath("io", "data", "stata", "stata14_118.dta")) as rdr: + vl = rdr.variable_labels() + vl_expected = { + "Unicode_Cities_Strl": "Here are some strls with Ünicode chars", + "Longs": "long data", + "Things": "Here are some things", + "Bytes": "byte data", + "Ints": "int data", + "Cities": "Here are some cities", + "Floats": "float data", + } + tm.assert_dict_equal(vl, vl_expected) + + assert rdr.data_label == "This is a Ünicode data label" + + def test_read_write_dta5(self): + original = DataFrame( + [(np.nan, np.nan, np.nan, np.nan, np.nan)], + columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"], + ) + original.index.name = "index" + + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates=None) + written_and_read_again = self.read_dta(path) + + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + def test_write_dta6(self, datapath): + original = self.read_csv(datapath("io", "data", "stata", "stata3.csv")) + original.index.name = "index" + original.index = original.index.astype(np.int32) + original["year"] = original["year"].astype(np.int32) + original["quarter"] = original["quarter"].astype(np.int32) + + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates=None) + written_and_read_again = self.read_dta(path) + tm.assert_frame_equal( + written_and_read_again.set_index("index"), + original, + check_index_type=False, + ) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_write_dta10(self, version): + original = DataFrame( + data=[["string", "object", 1, 1.1, np.datetime64("2003-12-25")]], + columns=["string", "object", "integer", "floating", "datetime"], + ) + original["object"] = Series(original["object"], dtype=object) + original.index.name = "index" + original.index = original.index.astype(np.int32) + original["integer"] = original["integer"].astype(np.int32) + + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates={"datetime": "tc"}, version=version) + written_and_read_again = self.read_dta(path) + # original.index is np.int32, read index is np.int64 + tm.assert_frame_equal( + written_and_read_again.set_index("index"), + original, + check_index_type=False, + ) + + def test_stata_doc_examples(self): + with tm.ensure_clean() as path: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") + ) + df.to_stata(path) + + def test_write_preserves_original(self): + # 9795 + + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd") + ) + df.loc[2, "a":"c"] = np.nan + df_copy = df.copy() + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False) + tm.assert_frame_equal(df, df_copy) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_encoding(self, version, datapath): + # GH 4626, proper encoding handling + raw = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta")) + encoded = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta")) + result = encoded.kreis1849[0] + + expected = raw.kreis1849[0] + assert result == expected + assert isinstance(result, str) + + with tm.ensure_clean() as path: + encoded.to_stata(path, write_index=False, version=version) + reread_encoded = read_stata(path) + tm.assert_frame_equal(encoded, reread_encoded) + + def test_read_write_dta11(self): + original = DataFrame( + [(1, 2, 3, 4)], + columns=[ + "good", + "b\u00E4d", + "8number", + "astringwithmorethan32characters______", + ], + ) + formatted = DataFrame( + [(1, 2, 3, 4)], + columns=["good", "b_d", "_8number", "astringwithmorethan32characters_"], + ) + formatted.index.name = "index" + formatted = formatted.astype(np.int32) + + with tm.ensure_clean() as path: + with tm.assert_produces_warning(InvalidColumnName): + original.to_stata(path, convert_dates=None) + + written_and_read_again = self.read_dta(path) + + expected = formatted.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_read_write_dta12(self, version): + original = DataFrame( + [(1, 2, 3, 4, 5, 6)], + columns=[ + "astringwithmorethan32characters_1", + "astringwithmorethan32characters_2", + "+", + "-", + "short", + "delete", + ], + ) + formatted = DataFrame( + [(1, 2, 3, 4, 5, 6)], + columns=[ + "astringwithmorethan32characters_", + "_0astringwithmorethan32character", + "_", + "_1_", + "_short", + "_delete", + ], + ) + formatted.index.name = "index" + formatted = formatted.astype(np.int32) + + with tm.ensure_clean() as path: + with tm.assert_produces_warning(InvalidColumnName): + original.to_stata(path, convert_dates=None, version=version) + # should get a warning for that format. + + written_and_read_again = self.read_dta(path) + + expected = formatted.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + def test_read_write_dta13(self): + s1 = Series(2**9, dtype=np.int16) + s2 = Series(2**17, dtype=np.int32) + s3 = Series(2**33, dtype=np.int64) + original = DataFrame({"int16": s1, "int32": s2, "int64": s3}) + original.index.name = "index" + + formatted = original + formatted["int64"] = formatted["int64"].astype(np.float64) + + with tm.ensure_clean() as path: + original.to_stata(path) + written_and_read_again = self.read_dta(path) + + expected = formatted.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + @pytest.mark.parametrize( + "file", ["stata5_113", "stata5_114", "stata5_115", "stata5_117"] + ) + def test_read_write_reread_dta14(self, file, parsed_114, version, datapath): + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + parsed.index.name = "index" + + tm.assert_frame_equal(parsed_114, parsed) + + with tm.ensure_clean() as path: + parsed_114.to_stata(path, convert_dates={"date_td": "td"}, version=version) + written_and_read_again = self.read_dta(path) + + expected = parsed_114.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + @pytest.mark.parametrize( + "file", ["stata6_113", "stata6_114", "stata6_115", "stata6_117"] + ) + def test_read_write_reread_dta15(self, file, datapath): + expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv")) + expected["byte_"] = expected["byte_"].astype(np.int8) + expected["int_"] = expected["int_"].astype(np.int16) + expected["long_"] = expected["long_"].astype(np.int32) + expected["float_"] = expected["float_"].astype(np.float32) + expected["double_"] = expected["double_"].astype(np.float64) + expected["date_td"] = expected["date_td"].apply( + datetime.strptime, args=("%Y-%m-%d",) + ) + + file = datapath("io", "data", "stata", f"{file}.dta") + parsed = self.read_dta(file) + + tm.assert_frame_equal(expected, parsed) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_timestamp_and_label(self, version): + original = DataFrame([(1,)], columns=["variable"]) + time_stamp = datetime(2000, 2, 29, 14, 21) + data_label = "This is a data file." + with tm.ensure_clean() as path: + original.to_stata( + path, time_stamp=time_stamp, data_label=data_label, version=version + ) + + with StataReader(path) as reader: + assert reader.time_stamp == "29 Feb 2000 14:21" + assert reader.data_label == data_label + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_invalid_timestamp(self, version): + original = DataFrame([(1,)], columns=["variable"]) + time_stamp = "01 Jan 2000, 00:00:00" + with tm.ensure_clean() as path: + msg = "time_stamp should be datetime type" + with pytest.raises(ValueError, match=msg): + original.to_stata(path, time_stamp=time_stamp, version=version) + assert not os.path.isfile(path) + + def test_numeric_column_names(self): + original = DataFrame(np.reshape(np.arange(25.0), (5, 5))) + original.index.name = "index" + with tm.ensure_clean() as path: + # should get a warning for that format. + with tm.assert_produces_warning(InvalidColumnName): + original.to_stata(path) + + written_and_read_again = self.read_dta(path) + + written_and_read_again = written_and_read_again.set_index("index") + columns = list(written_and_read_again.columns) + convert_col_name = lambda x: int(x[1]) + written_and_read_again.columns = map(convert_col_name, columns) + + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(expected, written_and_read_again) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_nan_to_missing_value(self, version): + s1 = Series(np.arange(4.0), dtype=np.float32) + s2 = Series(np.arange(4.0), dtype=np.float64) + s1[::2] = np.nan + s2[1::2] = np.nan + original = DataFrame({"s1": s1, "s2": s2}) + original.index.name = "index" + + with tm.ensure_clean() as path: + original.to_stata(path, version=version) + written_and_read_again = self.read_dta(path) + + written_and_read_again = written_and_read_again.set_index("index") + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again, expected) + + def test_no_index(self): + columns = ["x", "y"] + original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns) + original.index.name = "index_not_written" + with tm.ensure_clean() as path: + original.to_stata(path, write_index=False) + written_and_read_again = self.read_dta(path) + with pytest.raises(KeyError, match=original.index.name): + written_and_read_again["index_not_written"] + + def test_string_no_dates(self): + s1 = Series(["a", "A longer string"]) + s2 = Series([1.0, 2.0], dtype=np.float64) + original = DataFrame({"s1": s1, "s2": s2}) + original.index.name = "index" + with tm.ensure_clean() as path: + original.to_stata(path) + written_and_read_again = self.read_dta(path) + + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + def test_large_value_conversion(self): + s0 = Series([1, 99], dtype=np.int8) + s1 = Series([1, 127], dtype=np.int8) + s2 = Series([1, 2**15 - 1], dtype=np.int16) + s3 = Series([1, 2**63 - 1], dtype=np.int64) + original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3}) + original.index.name = "index" + with tm.ensure_clean() as path: + with tm.assert_produces_warning(PossiblePrecisionLoss): + original.to_stata(path) + + written_and_read_again = self.read_dta(path) + + modified = original.copy() + modified["s1"] = Series(modified["s1"], dtype=np.int16) + modified["s2"] = Series(modified["s2"], dtype=np.int32) + modified["s3"] = Series(modified["s3"], dtype=np.float64) + modified.index = original.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), modified) + + def test_dates_invalid_column(self): + original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)]) + original.index.name = "index" + with tm.ensure_clean() as path: + with tm.assert_produces_warning(InvalidColumnName): + original.to_stata(path, convert_dates={0: "tc"}) + + written_and_read_again = self.read_dta(path) + + modified = original.copy() + modified.columns = ["_0"] + modified.index = original.index.astype(np.int32) + tm.assert_frame_equal(written_and_read_again.set_index("index"), modified) + + def test_105(self, datapath): + # Data obtained from: + # http://go.worldbank.org/ZXY29PVJ21 + dpath = datapath("io", "data", "stata", "S4_EDUC1.dta") + df = read_stata(dpath) + df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]] + df0 = DataFrame(df0) + df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"] + df0["clustnum"] = df0["clustnum"].astype(np.int16) + df0["pri_schl"] = df0["pri_schl"].astype(np.int8) + df0["psch_num"] = df0["psch_num"].astype(np.int8) + df0["psch_dis"] = df0["psch_dis"].astype(np.float32) + tm.assert_frame_equal(df.head(3), df0) + + def test_value_labels_old_format(self, datapath): + # GH 19417 + # + # Test that value_labels() returns an empty dict if the file format + # predates supporting value labels. + dpath = datapath("io", "data", "stata", "S4_EDUC1.dta") + with StataReader(dpath) as reader: + assert reader.value_labels() == {} + + def test_date_export_formats(self): + columns = ["tc", "td", "tw", "tm", "tq", "th", "ty"] + conversions = {c: c for c in columns} + data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns) + original = DataFrame([data], columns=columns) + original.index.name = "index" + expected_values = [ + datetime(2006, 11, 20, 23, 13, 20), # Time + datetime(2006, 11, 20), # Day + datetime(2006, 11, 19), # Week + datetime(2006, 11, 1), # Month + datetime(2006, 10, 1), # Quarter year + datetime(2006, 7, 1), # Half year + datetime(2006, 1, 1), + ] # Year + + expected = DataFrame( + [expected_values], + index=pd.Index([0], dtype=np.int32, name="index"), + columns=columns, + ) + + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates=conversions) + written_and_read_again = self.read_dta(path) + + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + def test_write_missing_strings(self): + original = DataFrame([["1"], [None]], columns=["foo"]) + + expected = DataFrame( + [["1"], [""]], + index=pd.Index([0, 1], dtype=np.int32, name="index"), + columns=["foo"], + ) + + with tm.ensure_clean() as path: + original.to_stata(path) + written_and_read_again = self.read_dta(path) + + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + @pytest.mark.parametrize("byteorder", [">", "<"]) + def test_bool_uint(self, byteorder, version): + s0 = Series([0, 1, True], dtype=np.bool_) + s1 = Series([0, 1, 100], dtype=np.uint8) + s2 = Series([0, 1, 255], dtype=np.uint8) + s3 = Series([0, 1, 2**15 - 100], dtype=np.uint16) + s4 = Series([0, 1, 2**16 - 1], dtype=np.uint16) + s5 = Series([0, 1, 2**31 - 100], dtype=np.uint32) + s6 = Series([0, 1, 2**32 - 1], dtype=np.uint32) + + original = DataFrame( + {"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6} + ) + original.index.name = "index" + expected = original.copy() + expected.index = original.index.astype(np.int32) + expected_types = ( + np.int8, + np.int8, + np.int16, + np.int16, + np.int32, + np.int32, + np.float64, + ) + for c, t in zip(expected.columns, expected_types): + expected[c] = expected[c].astype(t) + + with tm.ensure_clean() as path: + original.to_stata(path, byteorder=byteorder, version=version) + written_and_read_again = self.read_dta(path) + + written_and_read_again = written_and_read_again.set_index("index") + tm.assert_frame_equal(written_and_read_again, expected) + + def test_variable_labels(self, datapath): + with StataReader(datapath("io", "data", "stata", "stata7_115.dta")) as rdr: + sr_115 = rdr.variable_labels() + with StataReader(datapath("io", "data", "stata", "stata7_117.dta")) as rdr: + sr_117 = rdr.variable_labels() + keys = ("var1", "var2", "var3") + labels = ("label1", "label2", "label3") + for k, v in sr_115.items(): + assert k in sr_117 + assert v == sr_117[k] + assert k in keys + assert v in labels + + def test_minimal_size_col(self): + str_lens = (1, 100, 244) + s = {} + for str_len in str_lens: + s["s" + str(str_len)] = Series( + ["a" * str_len, "b" * str_len, "c" * str_len] + ) + original = DataFrame(s) + with tm.ensure_clean() as path: + original.to_stata(path, write_index=False) + + with StataReader(path) as sr: + sr._ensure_open() # The `_*list` variables are initialized here + for variable, fmt, typ in zip(sr._varlist, sr._fmtlist, sr._typlist): + assert int(variable[1:]) == int(fmt[1:-1]) + assert int(variable[1:]) == typ + + def test_excessively_long_string(self): + str_lens = (1, 244, 500) + s = {} + for str_len in str_lens: + s["s" + str(str_len)] = Series( + ["a" * str_len, "b" * str_len, "c" * str_len] + ) + original = DataFrame(s) + msg = ( + r"Fixed width strings in Stata \.dta files are limited to 244 " + r"\(or fewer\)\ncharacters\. Column 's500' does not satisfy " + r"this restriction\. Use the\n'version=117' parameter to write " + r"the newer \(Stata 13 and later\) format\." + ) + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + original.to_stata(path) + + def test_missing_value_generator(self): + types = ("b", "h", "l") + df = DataFrame([[0.0]], columns=["float_"]) + with tm.ensure_clean() as path: + df.to_stata(path) + with StataReader(path) as rdr: + valid_range = rdr.VALID_RANGE + expected_values = ["." + chr(97 + i) for i in range(26)] + expected_values.insert(0, ".") + for t in types: + offset = valid_range[t][1] + for i in range(27): + val = StataMissingValue(offset + 1 + i) + assert val.string == expected_values[i] + + # Test extremes for floats + val = StataMissingValue(struct.unpack(" DataFrame: + """ + Emulate the categorical casting behavior we expect from roundtripping. + """ + for col in from_frame: + ser = from_frame[col] + if isinstance(ser.dtype, CategoricalDtype): + cat = ser._values.remove_unused_categories() + if cat.categories.dtype == object: + categories = pd.Index._with_infer(cat.categories._values) + cat = cat.set_categories(categories) + from_frame[col] = cat + return from_frame + + def test_iterator(self, datapath): + fname = datapath("io", "data", "stata", "stata3_117.dta") + + parsed = read_stata(fname) + + with read_stata(fname, iterator=True) as itr: + chunk = itr.read(5) + tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) + + with read_stata(fname, chunksize=5) as itr: + chunk = list(itr) + tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0]) + + with read_stata(fname, iterator=True) as itr: + chunk = itr.get_chunk(5) + tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) + + with read_stata(fname, chunksize=5) as itr: + chunk = itr.get_chunk() + tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) + + # GH12153 + with read_stata(fname, chunksize=4) as itr: + from_chunks = pd.concat(itr) + tm.assert_frame_equal(parsed, from_chunks) + + @pytest.mark.filterwarnings("ignore::UserWarning") + @pytest.mark.parametrize( + "file", + [ + "stata2_115", + "stata3_115", + "stata4_115", + "stata5_115", + "stata6_115", + "stata7_115", + "stata8_115", + "stata9_115", + "stata10_115", + "stata11_115", + ], + ) + @pytest.mark.parametrize("chunksize", [1, 2]) + @pytest.mark.parametrize("convert_categoricals", [False, True]) + @pytest.mark.parametrize("convert_dates", [False, True]) + def test_read_chunks_115( + self, file, chunksize, convert_categoricals, convert_dates, datapath + ): + fname = datapath("io", "data", "stata", f"{file}.dta") + + # Read the whole file + parsed = read_stata( + fname, + convert_categoricals=convert_categoricals, + convert_dates=convert_dates, + ) + + # Compare to what we get when reading by chunk + with read_stata( + fname, + iterator=True, + convert_dates=convert_dates, + convert_categoricals=convert_categoricals, + ) as itr: + pos = 0 + for j in range(5): + try: + chunk = itr.read(chunksize) + except StopIteration: + break + from_frame = parsed.iloc[pos : pos + chunksize, :].copy() + from_frame = self._convert_categorical(from_frame) + tm.assert_frame_equal( + from_frame, chunk, check_dtype=False, check_datetimelike_compat=True + ) + pos += chunksize + + def test_read_chunks_columns(self, datapath): + fname = datapath("io", "data", "stata", "stata3_117.dta") + columns = ["quarter", "cpi", "m1"] + chunksize = 2 + + parsed = read_stata(fname, columns=columns) + with read_stata(fname, iterator=True) as itr: + pos = 0 + for j in range(5): + chunk = itr.read(chunksize, columns=columns) + if chunk is None: + break + from_frame = parsed.iloc[pos : pos + chunksize, :] + tm.assert_frame_equal(from_frame, chunk, check_dtype=False) + pos += chunksize + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_write_variable_labels(self, version, mixed_frame): + # GH 13631, add support for writing variable labels + mixed_frame.index.name = "index" + variable_labels = {"a": "City Rank", "b": "City Exponent", "c": "City"} + with tm.ensure_clean() as path: + mixed_frame.to_stata(path, variable_labels=variable_labels, version=version) + with StataReader(path) as sr: + read_labels = sr.variable_labels() + expected_labels = { + "index": "", + "a": "City Rank", + "b": "City Exponent", + "c": "City", + } + assert read_labels == expected_labels + + variable_labels["index"] = "The Index" + with tm.ensure_clean() as path: + mixed_frame.to_stata(path, variable_labels=variable_labels, version=version) + with StataReader(path) as sr: + read_labels = sr.variable_labels() + assert read_labels == variable_labels + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_invalid_variable_labels(self, version, mixed_frame): + mixed_frame.index.name = "index" + variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"} + with tm.ensure_clean() as path: + msg = "Variable labels must be 80 characters or fewer" + with pytest.raises(ValueError, match=msg): + mixed_frame.to_stata( + path, variable_labels=variable_labels, version=version + ) + + @pytest.mark.parametrize("version", [114, 117]) + def test_invalid_variable_label_encoding(self, version, mixed_frame): + mixed_frame.index.name = "index" + variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"} + variable_labels["a"] = "invalid character Œ" + with tm.ensure_clean() as path: + with pytest.raises( + ValueError, match="Variable labels must contain only characters" + ): + mixed_frame.to_stata( + path, variable_labels=variable_labels, version=version + ) + + def test_write_variable_label_errors(self, mixed_frame): + values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"] + + variable_labels_utf8 = { + "a": "City Rank", + "b": "City Exponent", + "c": "".join(values), + } + + msg = ( + "Variable labels must contain only characters that can be " + "encoded in Latin-1" + ) + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + mixed_frame.to_stata(path, variable_labels=variable_labels_utf8) + + variable_labels_long = { + "a": "City Rank", + "b": "City Exponent", + "c": "A very, very, very long variable label " + "that is too long for Stata which means " + "that it has more than 80 characters", + } + + msg = "Variable labels must be 80 characters or fewer" + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + mixed_frame.to_stata(path, variable_labels=variable_labels_long) + + def test_default_date_conversion(self): + # GH 12259 + dates = [ + dt.datetime(1999, 12, 31, 12, 12, 12, 12000), + dt.datetime(2012, 12, 21, 12, 21, 12, 21000), + dt.datetime(1776, 7, 4, 7, 4, 7, 4000), + ] + original = DataFrame( + { + "nums": [1.0, 2.0, 3.0], + "strs": ["apple", "banana", "cherry"], + "dates": dates, + } + ) + + with tm.ensure_clean() as path: + original.to_stata(path, write_index=False) + reread = read_stata(path, convert_dates=True) + tm.assert_frame_equal(original, reread) + + original.to_stata(path, write_index=False, convert_dates={"dates": "tc"}) + direct = read_stata(path, convert_dates=True) + tm.assert_frame_equal(reread, direct) + + dates_idx = original.columns.tolist().index("dates") + original.to_stata(path, write_index=False, convert_dates={dates_idx: "tc"}) + direct = read_stata(path, convert_dates=True) + tm.assert_frame_equal(reread, direct) + + def test_unsupported_type(self): + original = DataFrame({"a": [1 + 2j, 2 + 4j]}) + + msg = "Data type complex128 not supported" + with pytest.raises(NotImplementedError, match=msg): + with tm.ensure_clean() as path: + original.to_stata(path) + + def test_unsupported_datetype(self): + dates = [ + dt.datetime(1999, 12, 31, 12, 12, 12, 12000), + dt.datetime(2012, 12, 21, 12, 21, 12, 21000), + dt.datetime(1776, 7, 4, 7, 4, 7, 4000), + ] + original = DataFrame( + { + "nums": [1.0, 2.0, 3.0], + "strs": ["apple", "banana", "cherry"], + "dates": dates, + } + ) + + msg = "Format %tC not implemented" + with pytest.raises(NotImplementedError, match=msg): + with tm.ensure_clean() as path: + original.to_stata(path, convert_dates={"dates": "tC"}) + + dates = pd.date_range("1-1-1990", periods=3, tz="Asia/Hong_Kong") + original = DataFrame( + { + "nums": [1.0, 2.0, 3.0], + "strs": ["apple", "banana", "cherry"], + "dates": dates, + } + ) + with pytest.raises(NotImplementedError, match="Data type datetime64"): + with tm.ensure_clean() as path: + original.to_stata(path) + + def test_repeated_column_labels(self, datapath): + # GH 13923, 25772 + msg = """ +Value labels for column ethnicsn are not unique. These cannot be converted to +pandas categoricals. + +Either read the file with `convert_categoricals` set to False or use the +low level interface in `StataReader` to separately read the values and the +value_labels. + +The repeated labels are:\n-+\nwolof +""" + with pytest.raises(ValueError, match=msg): + read_stata( + datapath("io", "data", "stata", "stata15.dta"), + convert_categoricals=True, + ) + + def test_stata_111(self, datapath): + # 111 is an old version but still used by current versions of + # SAS when exporting to Stata format. We do not know of any + # on-line documentation for this version. + df = read_stata(datapath("io", "data", "stata", "stata7_111.dta")) + original = DataFrame( + { + "y": [1, 1, 1, 1, 1, 0, 0, np.nan, 0, 0], + "x": [1, 2, 1, 3, np.nan, 4, 3, 5, 1, 6], + "w": [2, np.nan, 5, 2, 4, 4, 3, 1, 2, 3], + "z": ["a", "b", "c", "d", "e", "", "g", "h", "i", "j"], + } + ) + original = original[["y", "x", "w", "z"]] + tm.assert_frame_equal(original, df) + + def test_out_of_range_double(self): + # GH 14618 + df = DataFrame( + { + "ColumnOk": [0.0, np.finfo(np.double).eps, 4.49423283715579e307], + "ColumnTooBig": [0.0, np.finfo(np.double).eps, np.finfo(np.double).max], + } + ) + msg = ( + r"Column ColumnTooBig has a maximum value \(.+\) outside the range " + r"supported by Stata \(.+\)" + ) + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + df.to_stata(path) + + def test_out_of_range_float(self): + original = DataFrame( + { + "ColumnOk": [ + 0.0, + np.finfo(np.float32).eps, + np.finfo(np.float32).max / 10.0, + ], + "ColumnTooBig": [ + 0.0, + np.finfo(np.float32).eps, + np.finfo(np.float32).max, + ], + } + ) + original.index.name = "index" + for col in original: + original[col] = original[col].astype(np.float32) + + with tm.ensure_clean() as path: + original.to_stata(path) + reread = read_stata(path) + + original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64) + expected = original.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(reread.set_index("index"), expected) + + @pytest.mark.parametrize("infval", [np.inf, -np.inf]) + def test_inf(self, infval): + # GH 45350 + df = DataFrame({"WithoutInf": [0.0, 1.0], "WithInf": [2.0, infval]}) + msg = ( + "Column WithInf contains infinity or -infinity" + "which is outside the range supported by Stata." + ) + with pytest.raises(ValueError, match=msg): + with tm.ensure_clean() as path: + df.to_stata(path) + + def test_path_pathlib(self): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + reader = lambda x: read_stata(x).set_index("index") + result = tm.round_trip_pathlib(df.to_stata, reader) + tm.assert_frame_equal(df, result) + + def test_pickle_path_localpath(self): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + reader = lambda x: read_stata(x).set_index("index") + result = tm.round_trip_localpath(df.to_stata, reader) + tm.assert_frame_equal(df, result) + + @pytest.mark.parametrize("write_index", [True, False]) + def test_value_labels_iterator(self, write_index): + # GH 16923 + d = {"A": ["B", "E", "C", "A", "E"]} + df = DataFrame(data=d) + df["A"] = df["A"].astype("category") + with tm.ensure_clean() as path: + df.to_stata(path, write_index=write_index) + + with read_stata(path, iterator=True) as dta_iter: + value_labels = dta_iter.value_labels() + assert value_labels == {"A": {0: "A", 1: "B", 2: "C", 3: "E"}} + + def test_set_index(self): + # GH 17328 + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + with tm.ensure_clean() as path: + df.to_stata(path) + reread = read_stata(path, index_col="index") + tm.assert_frame_equal(df, reread) + + @pytest.mark.parametrize( + "column", ["ms", "day", "week", "month", "qtr", "half", "yr"] + ) + def test_date_parsing_ignores_format_details(self, column, datapath): + # GH 17797 + # + # Test that display formats are ignored when determining if a numeric + # column is a date value. + # + # All date types are stored as numbers and format associated with the + # column denotes both the type of the date and the display format. + # + # STATA supports 9 date types which each have distinct units. We test 7 + # of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that + # accounts for leap seconds and %tb relies on STATAs business calendar. + df = read_stata(datapath("io", "data", "stata", "stata13_dates.dta")) + unformatted = df.loc[0, column] + formatted = df.loc[0, column + "_fmt"] + assert unformatted == formatted + + def test_writer_117(self): + original = DataFrame( + data=[ + [ + "string", + "object", + 1, + 1, + 1, + 1.1, + 1.1, + np.datetime64("2003-12-25"), + "a", + "a" * 2045, + "a" * 5000, + "a", + ], + [ + "string-1", + "object-1", + 1, + 1, + 1, + 1.1, + 1.1, + np.datetime64("2003-12-26"), + "b", + "b" * 2045, + "", + "", + ], + ], + columns=[ + "string", + "object", + "int8", + "int16", + "int32", + "float32", + "float64", + "datetime", + "s1", + "s2045", + "srtl", + "forced_strl", + ], + ) + original["object"] = Series(original["object"], dtype=object) + original["int8"] = Series(original["int8"], dtype=np.int8) + original["int16"] = Series(original["int16"], dtype=np.int16) + original["int32"] = original["int32"].astype(np.int32) + original["float32"] = Series(original["float32"], dtype=np.float32) + original.index.name = "index" + original.index = original.index.astype(np.int32) + copy = original.copy() + with tm.ensure_clean() as path: + original.to_stata( + path, + convert_dates={"datetime": "tc"}, + convert_strl=["forced_strl"], + version=117, + ) + written_and_read_again = self.read_dta(path) + # original.index is np.int32, read index is np.int64 + tm.assert_frame_equal( + written_and_read_again.set_index("index"), + original, + check_index_type=False, + ) + tm.assert_frame_equal(original, copy) + + def test_convert_strl_name_swap(self): + original = DataFrame( + [["a" * 3000, "A", "apple"], ["b" * 1000, "B", "banana"]], + columns=["long1" * 10, "long", 1], + ) + original.index.name = "index" + + with tm.assert_produces_warning(InvalidColumnName): + with tm.ensure_clean() as path: + original.to_stata(path, convert_strl=["long", 1], version=117) + reread = self.read_dta(path) + reread = reread.set_index("index") + reread.columns = original.columns + tm.assert_frame_equal(reread, original, check_index_type=False) + + def test_invalid_date_conversion(self): + # GH 12259 + dates = [ + dt.datetime(1999, 12, 31, 12, 12, 12, 12000), + dt.datetime(2012, 12, 21, 12, 21, 12, 21000), + dt.datetime(1776, 7, 4, 7, 4, 7, 4000), + ] + original = DataFrame( + { + "nums": [1.0, 2.0, 3.0], + "strs": ["apple", "banana", "cherry"], + "dates": dates, + } + ) + + with tm.ensure_clean() as path: + msg = "convert_dates key must be a column or an integer" + with pytest.raises(ValueError, match=msg): + original.to_stata(path, convert_dates={"wrong_name": "tc"}) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_nonfile_writing(self, version): + # GH 21041 + bio = io.BytesIO() + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + with tm.ensure_clean() as path: + df.to_stata(bio, version=version) + bio.seek(0) + with open(path, "wb") as dta: + dta.write(bio.read()) + reread = read_stata(path, index_col="index") + tm.assert_frame_equal(df, reread) + + def test_gzip_writing(self): + # writing version 117 requires seek and cannot be used with gzip + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=pd.Index(list("ABCD"), dtype=object), + index=pd.Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "index" + with tm.ensure_clean() as path: + with gzip.GzipFile(path, "wb") as gz: + df.to_stata(gz, version=114) + with gzip.GzipFile(path, "rb") as gz: + reread = read_stata(gz, index_col="index") + tm.assert_frame_equal(df, reread) + + def test_unicode_dta_118(self, datapath): + unicode_df = self.read_dta(datapath("io", "data", "stata", "stata16_118.dta")) + + columns = ["utf8", "latin1", "ascii", "utf8_strl", "ascii_strl"] + values = [ + ["ραηδας", "PÄNDÄS", "p", "ραηδας", "p"], + ["ƤĀńĐąŜ", "Ö", "a", "ƤĀńĐąŜ", "a"], + ["ᴘᴀᴎᴅᴀS", "Ü", "n", "ᴘᴀᴎᴅᴀS", "n"], + [" ", " ", "d", " ", "d"], + [" ", "", "a", " ", "a"], + ["", "", "s", "", "s"], + ["", "", " ", "", " "], + ] + expected = DataFrame(values, columns=columns) + + tm.assert_frame_equal(unicode_df, expected) + + def test_mixed_string_strl(self): + # GH 23633 + output = [{"mixed": "string" * 500, "number": 0}, {"mixed": None, "number": 1}] + output = DataFrame(output) + output.number = output.number.astype("int32") + + with tm.ensure_clean() as path: + output.to_stata(path, write_index=False, version=117) + reread = read_stata(path) + expected = output.fillna("") + tm.assert_frame_equal(reread, expected) + + # Check strl supports all None (null) + output["mixed"] = None + output.to_stata( + path, write_index=False, convert_strl=["mixed"], version=117 + ) + reread = read_stata(path) + expected = output.fillna("") + tm.assert_frame_equal(reread, expected) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_all_none_exception(self, version): + output = [{"none": "none", "number": 0}, {"none": None, "number": 1}] + output = DataFrame(output) + output["none"] = None + with tm.ensure_clean() as path: + with pytest.raises(ValueError, match="Column `none` cannot be exported"): + output.to_stata(path, version=version) + + @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) + def test_invalid_file_not_written(self, version): + content = "Here is one __�__ Another one __·__ Another one __½__" + df = DataFrame([content], columns=["invalid"]) + with tm.ensure_clean() as path: + msg1 = ( + r"'latin-1' codec can't encode character '\\ufffd' " + r"in position 14: ordinal not in range\(256\)" + ) + msg2 = ( + "'ascii' codec can't decode byte 0xef in position 14: " + r"ordinal not in range\(128\)" + ) + with pytest.raises(UnicodeEncodeError, match=f"{msg1}|{msg2}"): + df.to_stata(path) + + def test_strl_latin1(self): + # GH 23573, correct GSO data to reflect correct size + output = DataFrame( + [["pandas"] * 2, ["þâÑÐŧ"] * 2], columns=["var_str", "var_strl"] + ) + + with tm.ensure_clean() as path: + output.to_stata(path, version=117, convert_strl=["var_strl"]) + with open(path, "rb") as reread: + content = reread.read() + expected = "þâÑÐŧ" + assert expected.encode("latin-1") in content + assert expected.encode("utf-8") in content + gsos = content.split(b"strls")[1][1:-2] + for gso in gsos.split(b"GSO")[1:]: + val = gso.split(b"\x00")[-2] + size = gso[gso.find(b"\x82") + 1] + assert len(val) == size - 1 + + def test_encoding_latin1_118(self, datapath): + # GH 25960 + msg = """ +One or more strings in the dta file could not be decoded using utf-8, and +so the fallback encoding of latin-1 is being used. This can happen when a file +has been incorrectly encoded by Stata or some other software. You should verify +the string values returned are correct.""" + # Move path outside of read_stata, or else assert_produces_warning + # will block pytests skip mechanism from triggering (failing the test) + # if the path is not present + path = datapath("io", "data", "stata", "stata1_encoding_118.dta") + with tm.assert_produces_warning(UnicodeWarning, filter_level="once") as w: + encoded = read_stata(path) + # with filter_level="always", produces 151 warnings which can be slow + assert len(w) == 1 + assert w[0].message.args[0] == msg + + expected = DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"]) + tm.assert_frame_equal(encoded, expected) + + @pytest.mark.slow + def test_stata_119(self, datapath): + # Gzipped since contains 32,999 variables and uncompressed is 20MiB + # Just validate that the reader reports correct number of variables + # to avoid high peak memory + with gzip.open( + datapath("io", "data", "stata", "stata1_119.dta.gz"), "rb" + ) as gz: + with StataReader(gz) as reader: + reader._ensure_open() + assert reader._nvar == 32999 + + @pytest.mark.parametrize("version", [118, 119, None]) + def test_utf8_writer(self, version): + cat = pd.Categorical(["a", "β", "ĉ"], ordered=True) + data = DataFrame( + [ + [1.0, 1, "ᴬ", "ᴀ relatively long ŝtring"], + [2.0, 2, "ᴮ", ""], + [3.0, 3, "ᴰ", None], + ], + columns=["Å", "β", "ĉ", "strls"], + ) + data["ᴐᴬᵀ"] = cat + variable_labels = { + "Å": "apple", + "β": "ᵈᵉᵊ", + "ĉ": "ᴎტჄႲႳႴႶႺ", + "strls": "Long Strings", + "ᴐᴬᵀ": "", + } + data_label = "ᴅaᵀa-label" + value_labels = {"β": {1: "label", 2: "æøå", 3: "ŋot valid latin-1"}} + data["β"] = data["β"].astype(np.int32) + with tm.ensure_clean() as path: + writer = StataWriterUTF8( + path, + data, + data_label=data_label, + convert_strl=["strls"], + variable_labels=variable_labels, + write_index=False, + version=version, + value_labels=value_labels, + ) + writer.write_file() + reread_encoded = read_stata(path) + # Missing is intentionally converted to empty strl + data["strls"] = data["strls"].fillna("") + # Variable with value labels is reread as categorical + data["β"] = ( + data["β"].replace(value_labels["β"]).astype("category").cat.as_ordered() + ) + tm.assert_frame_equal(data, reread_encoded) + with StataReader(path) as reader: + assert reader.data_label == data_label + assert reader.variable_labels() == variable_labels + + data.to_stata(path, version=version, write_index=False) + reread_to_stata = read_stata(path) + tm.assert_frame_equal(data, reread_to_stata) + + def test_writer_118_exceptions(self): + df = DataFrame(np.zeros((1, 33000), dtype=np.int8)) + with tm.ensure_clean() as path: + with pytest.raises(ValueError, match="version must be either 118 or 119."): + StataWriterUTF8(path, df, version=117) + with tm.ensure_clean() as path: + with pytest.raises(ValueError, match="You must use version 119"): + StataWriterUTF8(path, df, version=118) + + @pytest.mark.parametrize( + "dtype_backend", + ["numpy_nullable", pytest.param("pyarrow", marks=td.skip_if_no("pyarrow"))], + ) + def test_read_write_ea_dtypes(self, dtype_backend): + df = DataFrame( + { + "a": [1, 2, None], + "b": ["a", "b", "c"], + "c": [True, False, None], + "d": [1.5, 2.5, 3.5], + "e": pd.date_range("2020-12-31", periods=3, freq="D"), + }, + index=pd.Index([0, 1, 2], name="index"), + ) + df = df.convert_dtypes(dtype_backend=dtype_backend) + df.to_stata("test_stata.dta", version=118) + + with tm.ensure_clean() as path: + df.to_stata(path) + written_and_read_again = self.read_dta(path) + + expected = DataFrame( + { + "a": [1, 2, np.nan], + "b": ["a", "b", "c"], + "c": [1.0, 0, np.nan], + "d": [1.5, 2.5, 3.5], + "e": pd.date_range("2020-12-31", periods=3, freq="D"), + }, + index=pd.Index([0, 1, 2], name="index", dtype=np.int32), + ) + + tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) + + +@pytest.mark.parametrize("version", [105, 108, 111, 113, 114]) +def test_backward_compat(version, datapath): + data_base = datapath("io", "data", "stata") + ref = os.path.join(data_base, "stata-compat-118.dta") + old = os.path.join(data_base, f"stata-compat-{version}.dta") + expected = read_stata(ref) + old_dta = read_stata(old) + tm.assert_frame_equal(old_dta, expected, check_dtype=False) + + +def test_direct_read(datapath, monkeypatch): + file_path = datapath("io", "data", "stata", "stata-compat-118.dta") + + # Test that opening a file path doesn't buffer the file. + with StataReader(file_path) as reader: + # Must not have been buffered to memory + assert not reader.read().empty + assert not isinstance(reader._path_or_buf, io.BytesIO) + + # Test that we use a given fp exactly, if possible. + with open(file_path, "rb") as fp: + with StataReader(fp) as reader: + assert not reader.read().empty + assert reader._path_or_buf is fp + + # Test that we use a given BytesIO exactly, if possible. + with open(file_path, "rb") as fp: + with io.BytesIO(fp.read()) as bio: + with StataReader(bio) as reader: + assert not reader.read().empty + assert reader._path_or_buf is bio + + +def test_statareader_warns_when_used_without_context(datapath): + file_path = datapath("io", "data", "stata", "stata-compat-118.dta") + with tm.assert_produces_warning( + ResourceWarning, + match="without using a context manager", + ): + sr = StataReader(file_path) + sr.read() + with tm.assert_produces_warning( + FutureWarning, + match="is not part of the public API", + ): + sr.close() + + +@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) +@pytest.mark.parametrize("use_dict", [True, False]) +@pytest.mark.parametrize("infer", [True, False]) +def test_compression(compression, version, use_dict, infer, compression_to_extension): + file_name = "dta_inferred_compression.dta" + if compression: + if use_dict: + file_ext = compression + else: + file_ext = compression_to_extension[compression] + file_name += f".{file_ext}" + compression_arg = compression + if infer: + compression_arg = "infer" + if use_dict: + compression_arg = {"method": compression} + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") + ) + df.index.name = "index" + with tm.ensure_clean(file_name) as path: + df.to_stata(path, version=version, compression=compression_arg) + if compression == "gzip": + with gzip.open(path, "rb") as comp: + fp = io.BytesIO(comp.read()) + elif compression == "zip": + with zipfile.ZipFile(path, "r") as comp: + fp = io.BytesIO(comp.read(comp.filelist[0])) + elif compression == "tar": + with tarfile.open(path) as tar: + fp = io.BytesIO(tar.extractfile(tar.getnames()[0]).read()) + elif compression == "bz2": + with bz2.open(path, "rb") as comp: + fp = io.BytesIO(comp.read()) + elif compression == "zstd": + zstd = pytest.importorskip("zstandard") + with zstd.open(path, "rb") as comp: + fp = io.BytesIO(comp.read()) + elif compression == "xz": + lzma = pytest.importorskip("lzma") + with lzma.open(path, "rb") as comp: + fp = io.BytesIO(comp.read()) + elif compression is None: + fp = path + reread = read_stata(fp, index_col="index") + + expected = df.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(reread, expected) + + +@pytest.mark.parametrize("method", ["zip", "infer"]) +@pytest.mark.parametrize("file_ext", [None, "dta", "zip"]) +def test_compression_dict(method, file_ext): + file_name = f"test.{file_ext}" + archive_name = "test.dta" + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") + ) + df.index.name = "index" + with tm.ensure_clean(file_name) as path: + compression = {"method": method, "archive_name": archive_name} + df.to_stata(path, compression=compression) + if method == "zip" or file_ext == "zip": + with zipfile.ZipFile(path, "r") as zp: + assert len(zp.filelist) == 1 + assert zp.filelist[0].filename == archive_name + fp = io.BytesIO(zp.read(zp.filelist[0])) + else: + fp = path + reread = read_stata(fp, index_col="index") + + expected = df.copy() + expected.index = expected.index.astype(np.int32) + tm.assert_frame_equal(reread, expected) + + +@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) +def test_chunked_categorical(version): + df = DataFrame({"cats": Series(["a", "b", "a", "b", "c"], dtype="category")}) + df.index.name = "index" + + expected = df.copy() + expected.index = expected.index.astype(np.int32) + + with tm.ensure_clean() as path: + df.to_stata(path, version=version) + with StataReader(path, chunksize=2, order_categoricals=False) as reader: + for i, block in enumerate(reader): + block = block.set_index("index") + assert "cats" in block + tm.assert_series_equal( + block.cats, expected.cats.iloc[2 * i : 2 * (i + 1)] + ) + + +def test_chunked_categorical_partial(datapath): + dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta") + values = ["a", "b", "a", "b", 3.0] + with StataReader(dta_file, chunksize=2) as reader: + with tm.assert_produces_warning(CategoricalConversionWarning): + for i, block in enumerate(reader): + assert list(block.cats) == values[2 * i : 2 * (i + 1)] + if i < 2: + idx = pd.Index(["a", "b"]) + else: + idx = pd.Index([3.0], dtype="float64") + tm.assert_index_equal(block.cats.cat.categories, idx) + with tm.assert_produces_warning(CategoricalConversionWarning): + with StataReader(dta_file, chunksize=5) as reader: + large_chunk = reader.__next__() + direct = read_stata(dta_file) + tm.assert_frame_equal(direct, large_chunk) + + +@pytest.mark.parametrize("chunksize", (-1, 0, "apple")) +def test_iterator_errors(datapath, chunksize): + dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta") + with pytest.raises(ValueError, match="chunksize must be a positive"): + with StataReader(dta_file, chunksize=chunksize): + pass + + +def test_iterator_value_labels(): + # GH 31544 + values = ["c_label", "b_label"] + ["a_label"] * 500 + df = DataFrame({f"col{k}": pd.Categorical(values, ordered=True) for k in range(2)}) + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False) + expected = pd.Index(["a_label", "b_label", "c_label"], dtype="object") + with read_stata(path, chunksize=100) as reader: + for j, chunk in enumerate(reader): + for i in range(2): + tm.assert_index_equal(chunk.dtypes.iloc[i].categories, expected) + tm.assert_frame_equal(chunk, df.iloc[j * 100 : (j + 1) * 100]) + + +def test_precision_loss(): + df = DataFrame( + [[sum(2**i for i in range(60)), sum(2**i for i in range(52))]], + columns=["big", "little"], + ) + with tm.ensure_clean() as path: + with tm.assert_produces_warning( + PossiblePrecisionLoss, match="Column converted from int64 to float64" + ): + df.to_stata(path, write_index=False) + reread = read_stata(path) + expected_dt = Series([np.float64, np.float64], index=["big", "little"]) + tm.assert_series_equal(reread.dtypes, expected_dt) + assert reread.loc[0, "little"] == df.loc[0, "little"] + assert reread.loc[0, "big"] == float(df.loc[0, "big"]) + + +def test_compression_roundtrip(compression): + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.index.name = "index" + + with tm.ensure_clean() as path: + df.to_stata(path, compression=compression) + reread = read_stata(path, compression=compression, index_col="index") + tm.assert_frame_equal(df, reread) + + # explicitly ensure file was compressed. + with tm.decompress_file(path, compression) as fh: + contents = io.BytesIO(fh.read()) + reread = read_stata(contents, index_col="index") + tm.assert_frame_equal(df, reread) + + +@pytest.mark.parametrize("to_infer", [True, False]) +@pytest.mark.parametrize("read_infer", [True, False]) +def test_stata_compression( + compression_only, read_infer, to_infer, compression_to_extension +): + compression = compression_only + + ext = compression_to_extension[compression] + filename = f"test.{ext}" + + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.index.name = "index" + + to_compression = "infer" if to_infer else compression + read_compression = "infer" if read_infer else compression + + with tm.ensure_clean(filename) as path: + df.to_stata(path, compression=to_compression) + result = read_stata(path, compression=read_compression, index_col="index") + tm.assert_frame_equal(result, df) + + +def test_non_categorical_value_labels(): + data = DataFrame( + { + "fully_labelled": [1, 2, 3, 3, 1], + "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan], + "Y": [7, 7, 9, 8, 10], + "Z": pd.Categorical(["j", "k", "l", "k", "j"]), + } + ) + + with tm.ensure_clean() as path: + value_labels = { + "fully_labelled": {1: "one", 2: "two", 3: "three"}, + "partially_labelled": {1.0: "one", 2.0: "two"}, + } + expected = {**value_labels, "Z": {0: "j", 1: "k", 2: "l"}} + + writer = StataWriter(path, data, value_labels=value_labels) + writer.write_file() + + with StataReader(path) as reader: + reader_value_labels = reader.value_labels() + assert reader_value_labels == expected + + msg = "Can't create value labels for notY, it wasn't found in the dataset." + with pytest.raises(KeyError, match=msg): + value_labels = {"notY": {7: "label1", 8: "label2"}} + StataWriter(path, data, value_labels=value_labels) + + msg = ( + "Can't create value labels for Z, value labels " + "can only be applied to numeric columns." + ) + with pytest.raises(ValueError, match=msg): + value_labels = {"Z": {1: "a", 2: "k", 3: "j", 4: "i"}} + StataWriter(path, data, value_labels=value_labels) + + +def test_non_categorical_value_label_name_conversion(): + # Check conversion of invalid variable names + data = DataFrame( + { + "invalid~!": [1, 1, 2, 3, 5, 8], # Only alphanumeric and _ + "6_invalid": [1, 1, 2, 3, 5, 8], # Must start with letter or _ + "invalid_name_longer_than_32_characters": [8, 8, 9, 9, 8, 8], # Too long + "aggregate": [2, 5, 5, 6, 6, 9], # Reserved words + (1, 2): [1, 2, 3, 4, 5, 6], # Hashable non-string + } + ) + + value_labels = { + "invalid~!": {1: "label1", 2: "label2"}, + "6_invalid": {1: "label1", 2: "label2"}, + "invalid_name_longer_than_32_characters": {8: "eight", 9: "nine"}, + "aggregate": {5: "five"}, + (1, 2): {3: "three"}, + } + + expected = { + "invalid__": {1: "label1", 2: "label2"}, + "_6_invalid": {1: "label1", 2: "label2"}, + "invalid_name_longer_than_32_char": {8: "eight", 9: "nine"}, + "_aggregate": {5: "five"}, + "_1__2_": {3: "three"}, + } + + with tm.ensure_clean() as path: + with tm.assert_produces_warning(InvalidColumnName): + data.to_stata(path, value_labels=value_labels) + + with StataReader(path) as reader: + reader_value_labels = reader.value_labels() + assert reader_value_labels == expected + + +def test_non_categorical_value_label_convert_categoricals_error(): + # Mapping more than one value to the same label is valid for Stata + # labels, but can't be read with convert_categoricals=True + value_labels = { + "repeated_labels": {10: "Ten", 20: "More than ten", 40: "More than ten"} + } + + data = DataFrame( + { + "repeated_labels": [10, 10, 20, 20, 40, 40], + } + ) + + with tm.ensure_clean() as path: + data.to_stata(path, value_labels=value_labels) + + with StataReader(path, convert_categoricals=False) as reader: + reader_value_labels = reader.value_labels() + assert reader_value_labels == value_labels + + col = "repeated_labels" + repeats = "-" * 80 + "\n" + "\n".join(["More than ten"]) + + msg = f""" +Value labels for column {col} are not unique. These cannot be converted to +pandas categoricals. + +Either read the file with `convert_categoricals` set to False or use the +low level interface in `StataReader` to separately read the values and the +value_labels. + +The repeated labels are: +{repeats} +""" + with pytest.raises(ValueError, match=msg): + read_stata(path, convert_categoricals=True) + + +@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) +@pytest.mark.parametrize( + "dtype", + [ + pd.BooleanDtype, + pd.Int8Dtype, + pd.Int16Dtype, + pd.Int32Dtype, + pd.Int64Dtype, + pd.UInt8Dtype, + pd.UInt16Dtype, + pd.UInt32Dtype, + pd.UInt64Dtype, + ], +) +def test_nullable_support(dtype, version): + df = DataFrame( + { + "a": Series([1.0, 2.0, 3.0]), + "b": Series([1, pd.NA, pd.NA], dtype=dtype.name), + "c": Series(["a", "b", None]), + } + ) + dtype_name = df.b.dtype.numpy_dtype.name + # Only use supported names: no uint, bool or int64 + dtype_name = dtype_name.replace("u", "") + if dtype_name == "int64": + dtype_name = "int32" + elif dtype_name == "bool": + dtype_name = "int8" + value = StataMissingValue.BASE_MISSING_VALUES[dtype_name] + smv = StataMissingValue(value) + expected_b = Series([1, smv, smv], dtype=object, name="b") + expected_c = Series(["a", "b", ""], name="c") + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False, version=version) + reread = read_stata(path, convert_missing=True) + tm.assert_series_equal(df.a, reread.a) + tm.assert_series_equal(reread.b, expected_b) + tm.assert_series_equal(reread.c, expected_c) + + +def test_empty_frame(): + # GH 46240 + # create an empty DataFrame with int64 and float64 dtypes + df = DataFrame(data={"a": range(3), "b": [1.0, 2.0, 3.0]}).head(0) + with tm.ensure_clean() as path: + df.to_stata(path, write_index=False, version=117) + # Read entire dataframe + df2 = read_stata(path) + assert "b" in df2 + # Dtypes don't match since no support for int32 + dtypes = Series({"a": np.dtype("int32"), "b": np.dtype("float64")}) + tm.assert_series_equal(df2.dtypes, dtypes) + # read one column of empty .dta file + df3 = read_stata(path, columns=["a"]) + assert "b" not in df3 + tm.assert_series_equal(df3.dtypes, dtypes.loc[["a"]]) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e298adbce74c365af678d5912857dc175b088489 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc0465ba76064d1ffee5a621e1aec6a2cc126130 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38b14962b481aafeaa7b04b2c70224d3c107b784 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad73131ca7a1c507f9c81d4a46c86fe4aa4744be Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_boxplot_method.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_boxplot_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..247b03f13549ae90e0f5b0cb114478ca41b85ec2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_boxplot_method.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0acfc6abfb53b5e6cfe836aabb3b0de30ce7c9c0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..676525b1a0c00ccd61ae0f2400b20912a8fc7d8b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fccbee668c83e87b70ae34fb9e6329ee9554810b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f644f609b9512d663aa8f0b7988f968d00abe489 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_hist_method.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_hist_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25959563aa626e1faed4e752bf0a576cee282d3d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_hist_method.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_misc.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81d682c50f57259a0ddf02b8e30d089a7749f7ba Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_misc.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_series.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_series.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f6c1fad0431dc2b9c71c671c297d23995daa6ce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_series.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_style.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71a02c6be70714fef1ace6a25a1a285eb71dbf62 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_style.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/common.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/common.py new file mode 100644 index 0000000000000000000000000000000000000000..69120160699c24cc86670522f84ec6c7014c20ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/common.py @@ -0,0 +1,563 @@ +""" +Module consolidating common testing functions for checking plotting. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.api import is_list_like + +import pandas as pd +from pandas import Series +import pandas._testing as tm + +if TYPE_CHECKING: + from collections.abc import Sequence + + from matplotlib.axes import Axes + + +def _check_legend_labels(axes, labels=None, visible=True): + """ + Check each axes has expected legend labels + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + labels : list-like + expected legend labels + visible : bool + expected legend visibility. labels are checked only when visible is + True + """ + if visible and (labels is None): + raise ValueError("labels must be specified when visible is True") + axes = _flatten_visible(axes) + for ax in axes: + if visible: + assert ax.get_legend() is not None + _check_text_labels(ax.get_legend().get_texts(), labels) + else: + assert ax.get_legend() is None + + +def _check_legend_marker(ax, expected_markers=None, visible=True): + """ + Check ax has expected legend markers + + Parameters + ---------- + ax : matplotlib Axes object + expected_markers : list-like + expected legend markers + visible : bool + expected legend visibility. labels are checked only when visible is + True + """ + if visible and (expected_markers is None): + raise ValueError("Markers must be specified when visible is True") + if visible: + handles, _ = ax.get_legend_handles_labels() + markers = [handle.get_marker() for handle in handles] + assert markers == expected_markers + else: + assert ax.get_legend() is None + + +def _check_data(xp, rs): + """ + Check each axes has identical lines + + Parameters + ---------- + xp : matplotlib Axes object + rs : matplotlib Axes object + """ + import matplotlib.pyplot as plt + + xp_lines = xp.get_lines() + rs_lines = rs.get_lines() + + assert len(xp_lines) == len(rs_lines) + for xpl, rsl in zip(xp_lines, rs_lines): + xpdata = xpl.get_xydata() + rsdata = rsl.get_xydata() + tm.assert_almost_equal(xpdata, rsdata) + + plt.close("all") + + +def _check_visible(collections, visible=True): + """ + Check each artist is visible or not + + Parameters + ---------- + collections : matplotlib Artist or its list-like + target Artist or its list or collection + visible : bool + expected visibility + """ + from matplotlib.collections import Collection + + if not isinstance(collections, Collection) and not is_list_like(collections): + collections = [collections] + + for patch in collections: + assert patch.get_visible() == visible + + +def _check_patches_all_filled(axes: Axes | Sequence[Axes], filled: bool = True) -> None: + """ + Check for each artist whether it is filled or not + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + filled : bool + expected filling + """ + + axes = _flatten_visible(axes) + for ax in axes: + for patch in ax.patches: + assert patch.fill == filled + + +def _get_colors_mapped(series, colors): + unique = series.unique() + # unique and colors length can be differed + # depending on slice value + mapped = dict(zip(unique, colors)) + return [mapped[v] for v in series.values] + + +def _check_colors(collections, linecolors=None, facecolors=None, mapping=None): + """ + Check each artist has expected line colors and face colors + + Parameters + ---------- + collections : list-like + list or collection of target artist + linecolors : list-like which has the same length as collections + list of expected line colors + facecolors : list-like which has the same length as collections + list of expected face colors + mapping : Series + Series used for color grouping key + used for andrew_curves, parallel_coordinates, radviz test + """ + from matplotlib import colors + from matplotlib.collections import ( + Collection, + LineCollection, + PolyCollection, + ) + from matplotlib.lines import Line2D + + conv = colors.ColorConverter + if linecolors is not None: + if mapping is not None: + linecolors = _get_colors_mapped(mapping, linecolors) + linecolors = linecolors[: len(collections)] + + assert len(collections) == len(linecolors) + for patch, color in zip(collections, linecolors): + if isinstance(patch, Line2D): + result = patch.get_color() + # Line2D may contains string color expression + result = conv.to_rgba(result) + elif isinstance(patch, (PolyCollection, LineCollection)): + result = tuple(patch.get_edgecolor()[0]) + else: + result = patch.get_edgecolor() + + expected = conv.to_rgba(color) + assert result == expected + + if facecolors is not None: + if mapping is not None: + facecolors = _get_colors_mapped(mapping, facecolors) + facecolors = facecolors[: len(collections)] + + assert len(collections) == len(facecolors) + for patch, color in zip(collections, facecolors): + if isinstance(patch, Collection): + # returned as list of np.array + result = patch.get_facecolor()[0] + else: + result = patch.get_facecolor() + + if isinstance(result, np.ndarray): + result = tuple(result) + + expected = conv.to_rgba(color) + assert result == expected + + +def _check_text_labels(texts, expected): + """ + Check each text has expected labels + + Parameters + ---------- + texts : matplotlib Text object, or its list-like + target text, or its list + expected : str or list-like which has the same length as texts + expected text label, or its list + """ + if not is_list_like(texts): + assert texts.get_text() == expected + else: + labels = [t.get_text() for t in texts] + assert len(labels) == len(expected) + for label, e in zip(labels, expected): + assert label == e + + +def _check_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None): + """ + Check each axes has expected tick properties + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xlabelsize : number + expected xticks font size + xrot : number + expected xticks rotation + ylabelsize : number + expected yticks font size + yrot : number + expected yticks rotation + """ + from matplotlib.ticker import NullFormatter + + axes = _flatten_visible(axes) + for ax in axes: + if xlabelsize is not None or xrot is not None: + if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter): + # If minor ticks has NullFormatter, rot / fontsize are not + # retained + labels = ax.get_xticklabels() + else: + labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True) + + for label in labels: + if xlabelsize is not None: + tm.assert_almost_equal(label.get_fontsize(), xlabelsize) + if xrot is not None: + tm.assert_almost_equal(label.get_rotation(), xrot) + + if ylabelsize is not None or yrot is not None: + if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter): + labels = ax.get_yticklabels() + else: + labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True) + + for label in labels: + if ylabelsize is not None: + tm.assert_almost_equal(label.get_fontsize(), ylabelsize) + if yrot is not None: + tm.assert_almost_equal(label.get_rotation(), yrot) + + +def _check_ax_scales(axes, xaxis="linear", yaxis="linear"): + """ + Check each axes has expected scales + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xaxis : {'linear', 'log'} + expected xaxis scale + yaxis : {'linear', 'log'} + expected yaxis scale + """ + axes = _flatten_visible(axes) + for ax in axes: + assert ax.xaxis.get_scale() == xaxis + assert ax.yaxis.get_scale() == yaxis + + +def _check_axes_shape(axes, axes_num=None, layout=None, figsize=None): + """ + Check expected number of axes is drawn in expected layout + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + axes_num : number + expected number of axes. Unnecessary axes should be set to + invisible. + layout : tuple + expected layout, (expected number of rows , columns) + figsize : tuple + expected figsize. default is matplotlib default + """ + from pandas.plotting._matplotlib.tools import flatten_axes + + if figsize is None: + figsize = (6.4, 4.8) + visible_axes = _flatten_visible(axes) + + if axes_num is not None: + assert len(visible_axes) == axes_num + for ax in visible_axes: + # check something drawn on visible axes + assert len(ax.get_children()) > 0 + + if layout is not None: + x_set = set() + y_set = set() + for ax in flatten_axes(axes): + # check axes coordinates to estimate layout + points = ax.get_position().get_points() + x_set.add(points[0][0]) + y_set.add(points[0][1]) + result = (len(y_set), len(x_set)) + assert result == layout + + tm.assert_numpy_array_equal( + visible_axes[0].figure.get_size_inches(), + np.array(figsize, dtype=np.float64), + ) + + +def _flatten_visible(axes: Axes | Sequence[Axes]) -> Sequence[Axes]: + """ + Flatten axes, and filter only visible + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + + """ + from pandas.plotting._matplotlib.tools import flatten_axes + + axes_ndarray = flatten_axes(axes) + axes = [ax for ax in axes_ndarray if ax.get_visible()] + return axes + + +def _check_has_errorbars(axes, xerr=0, yerr=0): + """ + Check axes has expected number of errorbars + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xerr : number + expected number of x errorbar + yerr : number + expected number of y errorbar + """ + axes = _flatten_visible(axes) + for ax in axes: + containers = ax.containers + xerr_count = 0 + yerr_count = 0 + for c in containers: + has_xerr = getattr(c, "has_xerr", False) + has_yerr = getattr(c, "has_yerr", False) + if has_xerr: + xerr_count += 1 + if has_yerr: + yerr_count += 1 + assert xerr == xerr_count + assert yerr == yerr_count + + +def _check_box_return_type( + returned, return_type, expected_keys=None, check_ax_title=True +): + """ + Check box returned type is correct + + Parameters + ---------- + returned : object to be tested, returned from boxplot + return_type : str + return_type passed to boxplot + expected_keys : list-like, optional + group labels in subplot case. If not passed, + the function checks assuming boxplot uses single ax + check_ax_title : bool + Whether to check the ax.title is the same as expected_key + Intended to be checked by calling from ``boxplot``. + Normal ``plot`` doesn't attach ``ax.title``, it must be disabled. + """ + from matplotlib.axes import Axes + + types = {"dict": dict, "axes": Axes, "both": tuple} + if expected_keys is None: + # should be fixed when the returning default is changed + if return_type is None: + return_type = "dict" + + assert isinstance(returned, types[return_type]) + if return_type == "both": + assert isinstance(returned.ax, Axes) + assert isinstance(returned.lines, dict) + else: + # should be fixed when the returning default is changed + if return_type is None: + for r in _flatten_visible(returned): + assert isinstance(r, Axes) + return + + assert isinstance(returned, Series) + + assert sorted(returned.keys()) == sorted(expected_keys) + for key, value in returned.items(): + assert isinstance(value, types[return_type]) + # check returned dict has correct mapping + if return_type == "axes": + if check_ax_title: + assert value.get_title() == key + elif return_type == "both": + if check_ax_title: + assert value.ax.get_title() == key + assert isinstance(value.ax, Axes) + assert isinstance(value.lines, dict) + elif return_type == "dict": + line = value["medians"][0] + axes = line.axes + if check_ax_title: + assert axes.get_title() == key + else: + raise AssertionError + + +def _check_grid_settings(obj, kinds, kws={}): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + + import matplotlib as mpl + + def is_grid_on(): + xticks = mpl.pyplot.gca().xaxis.get_major_ticks() + yticks = mpl.pyplot.gca().yaxis.get_major_ticks() + xoff = all(not g.gridline.get_visible() for g in xticks) + yoff = all(not g.gridline.get_visible() for g in yticks) + + return not (xoff and yoff) + + spndx = 1 + for kind in kinds: + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=False) + obj.plot(kind=kind, **kws) + assert not is_grid_on() + mpl.pyplot.clf() + + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=True) + obj.plot(kind=kind, grid=False, **kws) + assert not is_grid_on() + mpl.pyplot.clf() + + if kind not in ["pie", "hexbin", "scatter"]: + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=True) + obj.plot(kind=kind, **kws) + assert is_grid_on() + mpl.pyplot.clf() + + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=False) + obj.plot(kind=kind, grid=True, **kws) + assert is_grid_on() + mpl.pyplot.clf() + + +def _unpack_cycler(rcParams, field="color"): + """ + Auxiliary function for correctly unpacking cycler after MPL >= 1.5 + """ + return [v[field] for v in rcParams["axes.prop_cycle"]] + + +def get_x_axis(ax): + return ax._shared_axes["x"] + + +def get_y_axis(ax): + return ax._shared_axes["y"] + + +def _check_plot_works(f, default_axes=False, **kwargs): + """ + Create plot and ensure that plot return object is valid. + + Parameters + ---------- + f : func + Plotting function. + default_axes : bool, optional + If False (default): + - If `ax` not in `kwargs`, then create subplot(211) and plot there + - Create new subplot(212) and plot there as well + - Mind special corner case for bootstrap_plot (see `_gen_two_subplots`) + If True: + - Simply run plotting function with kwargs provided + - All required axes instances will be created automatically + - It is recommended to use it when the plotting function + creates multiple axes itself. It helps avoid warnings like + 'UserWarning: To output multiple subplots, + the figure containing the passed axes is being cleared' + **kwargs + Keyword arguments passed to the plotting function. + + Returns + ------- + Plot object returned by the last plotting. + """ + import matplotlib.pyplot as plt + + if default_axes: + gen_plots = _gen_default_plot + else: + gen_plots = _gen_two_subplots + + ret = None + try: + fig = kwargs.get("figure", plt.gcf()) + plt.clf() + + for ret in gen_plots(f, fig, **kwargs): + tm.assert_is_valid_plot_return_object(ret) + + finally: + plt.close(fig) + + return ret + + +def _gen_default_plot(f, fig, **kwargs): + """ + Create plot in a default way. + """ + yield f(**kwargs) + + +def _gen_two_subplots(f, fig, **kwargs): + """ + Create plot on two subplots forcefully created. + """ + if "ax" not in kwargs: + fig.add_subplot(211) + yield f(**kwargs) + + if f is pd.plotting.bootstrap_plot: + assert "ax" not in kwargs + else: + kwargs["ax"] = fig.add_subplot(212) + yield f(**kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..d688bbd47595c2ec6451bd9ddf7c916275013384 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py @@ -0,0 +1,56 @@ +import gc + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + to_datetime, +) + + +@pytest.fixture(autouse=True) +def mpl_cleanup(): + # matplotlib/testing/decorators.py#L24 + # 1) Resets units registry + # 2) Resets rc_context + # 3) Closes all figures + mpl = pytest.importorskip("matplotlib") + mpl_units = pytest.importorskip("matplotlib.units") + plt = pytest.importorskip("matplotlib.pyplot") + orig_units_registry = mpl_units.registry.copy() + with mpl.rc_context(): + mpl.use("template") + yield + mpl_units.registry.clear() + mpl_units.registry.update(orig_units_registry) + plt.close("all") + # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501 + gc.collect(1) + + +@pytest.fixture +def hist_df(): + n = 50 + rng = np.random.default_rng(10) + gender = rng.choice(["Male", "Female"], size=n) + classroom = rng.choice(["A", "B", "C"], size=n) + + hist_df = DataFrame( + { + "gender": gender, + "classroom": classroom, + "height": rng.normal(66, 4, size=n), + "weight": rng.normal(161, 32, size=n), + "category": rng.integers(4, size=n), + "datetime": to_datetime( + rng.integers( + 812419200000000000, + 819331200000000000, + size=n, + dtype=np.int64, + ) + ), + } + ) + return hist_df diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..433897ab3975b1f2370cb2bb0785add127b78f86 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d930e72d473154a96c967b2198f9afe4b3840924 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83a6fe6ee6a7c7c32799628c73d13773bc16e8c3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70aa74d1484c727d0a7b6e49756f58badec2798f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c1e822e64a8543f499963bc1e1700639ee90693 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4de4c939723737467af49054e7cb957906198c89 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..295e8041b932cd58698d6676c85e6f913ef1ff30 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..45dc612148f40ea29c7fac46b6b9d8edd29b17fb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py @@ -0,0 +1,2592 @@ +""" Test cases for DataFrame.plot """ +from datetime import ( + date, + datetime, +) +import gc +import itertools +import re +import string +import weakref + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.api import is_list_like + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + PeriodIndex, + Series, + bdate_range, + date_range, + option_context, + plotting, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_ax_scales, + _check_axes_shape, + _check_box_return_type, + _check_colors, + _check_data, + _check_grid_settings, + _check_has_errorbars, + _check_legend_labels, + _check_plot_works, + _check_text_labels, + _check_ticks_props, + _check_visible, + get_y_axis, +) + +from pandas.io.formats.printing import pprint_thing + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestDataFramePlots: + @pytest.mark.slow + def test_plot(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + _check_plot_works(df.plot, grid=False) + + @pytest.mark.slow + def test_plot_subplots(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + # _check_plot_works adds an ax so use default_axes=True to avoid warning + axes = _check_plot_works(df.plot, default_axes=True, subplots=True) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + + @pytest.mark.slow + def test_plot_subplots_negative_layout(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + axes = _check_plot_works( + df.plot, + default_axes=True, + subplots=True, + layout=(-1, 2), + ) + _check_axes_shape(axes, axes_num=4, layout=(2, 2)) + + @pytest.mark.slow + def test_plot_subplots_use_index(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + axes = _check_plot_works( + df.plot, + default_axes=True, + subplots=True, + use_index=False, + ) + _check_ticks_props(axes, xrot=0) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + @pytest.mark.slow + def test_plot_invalid_arg(self): + df = DataFrame({"x": [1, 2], "y": [3, 4]}) + msg = "'Line2D' object has no property 'blarg'" + with pytest.raises(AttributeError, match=msg): + df.plot.line(blarg=True) + + @pytest.mark.slow + def test_plot_tick_props(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + ax = _check_plot_works(df.plot, use_index=True) + _check_ticks_props(ax, xrot=0) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", + [ + {"yticks": [1, 5, 10]}, + {"xticks": [1, 5, 10]}, + {"ylim": (-100, 100), "xlim": (-100, 100)}, + {"default_axes": True, "subplots": True, "title": "blah"}, + ], + ) + def test_plot_other_args(self, kwargs): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + _check_plot_works(df.plot, **kwargs) + + @pytest.mark.slow + def test_plot_visible_ax(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + # We have to redo it here because _check_plot_works does two plots, + # once without an ax kwarg and once with an ax kwarg and the new sharex + # behaviour does not remove the visibility of the latter axis (as ax is + # present). see: https://github.com/pandas-dev/pandas/issues/9737 + + axes = df.plot(subplots=True, title="blah") + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + for ax in axes[:2]: + _check_visible(ax.xaxis) # xaxis must be visible for grid + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + _check_visible([ax.xaxis.get_label()], visible=False) + for ax in [axes[2]]: + _check_visible(ax.xaxis) + _check_visible(ax.get_xticklabels()) + _check_visible([ax.xaxis.get_label()]) + _check_ticks_props(ax, xrot=0) + + @pytest.mark.slow + def test_plot_title(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + _check_plot_works(df.plot, title="blah") + + @pytest.mark.slow + def test_plot_multiindex(self): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + ax = _check_plot_works(df.plot, use_index=True) + _check_ticks_props(ax, xrot=0) + + @pytest.mark.slow + def test_plot_multiindex_unicode(self): + # unicode + index = MultiIndex.from_tuples( + [ + ("\u03b1", 0), + ("\u03b1", 1), + ("\u03b2", 2), + ("\u03b2", 3), + ("\u03b3", 4), + ("\u03b3", 5), + ("\u03b4", 6), + ("\u03b4", 7), + ], + names=["i0", "i1"], + ) + columns = MultiIndex.from_tuples( + [("bar", "\u0394"), ("bar", "\u0395")], names=["c0", "c1"] + ) + df = DataFrame( + np.random.default_rng(2).integers(0, 10, (8, 2)), + columns=columns, + index=index, + ) + _check_plot_works(df.plot, title="\u03A3") + + @pytest.mark.slow + @pytest.mark.parametrize("layout", [None, (-1, 1)]) + def test_plot_single_column_bar(self, layout): + # GH 6951 + # Test with single column + df = DataFrame({"x": np.random.default_rng(2).random(10)}) + axes = _check_plot_works(df.plot.bar, subplots=True, layout=layout) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.slow + def test_plot_passed_ax(self): + # When ax is supplied and required number of axes is 1, + # passed ax should be used: + df = DataFrame({"x": np.random.default_rng(2).random(10)}) + _, ax = mpl.pyplot.subplots() + axes = df.plot.bar(subplots=True, ax=ax) + assert len(axes) == 1 + result = ax.axes + assert result is axes[0] + + @pytest.mark.parametrize( + "cols, x, y", + [ + [list("ABCDE"), "A", "B"], + [["A", "B"], "A", "B"], + [["C", "A"], "C", "A"], + [["A", "C"], "A", "C"], + [["B", "C"], "B", "C"], + [["A", "D"], "A", "D"], + [["A", "E"], "A", "E"], + ], + ) + def test_nullable_int_plot(self, cols, x, y): + # GH 32073 + dates = ["2008", "2009", None, "2011", "2012"] + df = DataFrame( + { + "A": [1, 2, 3, 4, 5], + "B": [1, 2, 3, 4, 5], + "C": np.array([7, 5, np.nan, 3, 2], dtype=object), + "D": pd.to_datetime(dates, format="%Y").view("i8"), + "E": pd.to_datetime(dates, format="%Y", utc=True).view("i8"), + } + ) + + _check_plot_works(df[cols].plot, x=x, y=y) + + @pytest.mark.slow + @pytest.mark.parametrize("plot", ["line", "bar", "hist", "pie"]) + def test_integer_array_plot_series(self, plot): + # GH 25587 + arr = pd.array([1, 2, 3, 4], dtype="UInt32") + + s = Series(arr) + _check_plot_works(getattr(s.plot, plot)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "plot, kwargs", + [ + ["line", {}], + ["bar", {}], + ["hist", {}], + ["pie", {"y": "y"}], + ["scatter", {"x": "x", "y": "y"}], + ["hexbin", {"x": "x", "y": "y"}], + ], + ) + def test_integer_array_plot_df(self, plot, kwargs): + # GH 25587 + arr = pd.array([1, 2, 3, 4], dtype="UInt32") + df = DataFrame({"x": arr, "y": arr}) + _check_plot_works(getattr(df.plot, plot), **kwargs) + + def test_nonnumeric_exclude(self): + df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}) + ax = df.plot() + assert len(ax.get_lines()) == 1 # B was plotted + + def test_implicit_label(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"] + ) + ax = df.plot(x="a", y="b") + _check_text_labels(ax.xaxis.get_label(), "a") + + def test_donot_overwrite_index_name(self): + # GH 8494 + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 2)), columns=["a", "b"] + ) + df.index.name = "NAME" + df.plot(y="b", label="LABEL") + assert df.index.name == "NAME" + + def test_plot_xy(self): + # columns.inferred_type == 'string' + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + _check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot()) + _check_data(df.plot(x=0), df.set_index("A").plot()) + _check_data(df.plot(y=0), df.B.plot()) + _check_data(df.plot(x="A", y="B"), df.set_index("A").B.plot()) + _check_data(df.plot(x="A"), df.set_index("A").plot()) + _check_data(df.plot(y="B"), df.B.plot()) + + def test_plot_xy_int_cols(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + # columns.inferred_type == 'integer' + df.columns = np.arange(1, len(df.columns) + 1) + _check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot()) + _check_data(df.plot(x=1), df.set_index(1).plot()) + _check_data(df.plot(y=1), df[1].plot()) + + def test_plot_xy_figsize_and_title(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + # figsize and title + ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8)) + _check_text_labels(ax.title, "Test") + _check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0)) + + # columns.inferred_type == 'mixed' + # TODO add MultiIndex test + + @pytest.mark.parametrize( + "input_log, expected_log", [(True, "log"), ("sym", "symlog")] + ) + def test_logscales(self, input_log, expected_log): + df = DataFrame({"a": np.arange(100)}, index=np.arange(100)) + + ax = df.plot(logy=input_log) + _check_ax_scales(ax, yaxis=expected_log) + assert ax.get_yscale() == expected_log + + ax = df.plot(logx=input_log) + _check_ax_scales(ax, xaxis=expected_log) + assert ax.get_xscale() == expected_log + + ax = df.plot(loglog=input_log) + _check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log) + assert ax.get_xscale() == expected_log + assert ax.get_yscale() == expected_log + + @pytest.mark.parametrize("input_param", ["logx", "logy", "loglog"]) + def test_invalid_logscale(self, input_param): + # GH: 24867 + df = DataFrame({"a": np.arange(100)}, index=np.arange(100)) + + msg = f"keyword '{input_param}' should be bool, None, or 'sym', not 'sm'" + with pytest.raises(ValueError, match=msg): + df.plot(**{input_param: "sm"}) + + msg = f"PiePlot ignores the '{input_param}' keyword" + with tm.assert_produces_warning(UserWarning, match=msg): + df.plot.pie(subplots=True, **{input_param: True}) + + def test_xcompat(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ax = df.plot(x_compat=True) + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + _check_ticks_props(ax, xrot=30) + + def test_xcompat_plot_params(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + plotting.plot_params["xaxis.compat"] = True + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + _check_ticks_props(ax, xrot=30) + + def test_xcompat_plot_params_x_compat(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + plotting.plot_params["x_compat"] = False + + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex) + + def test_xcompat_plot_params_context_manager(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + # useful if you're plotting a bunch together + with plotting.plot_params.use("x_compat", True): + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + _check_ticks_props(ax, xrot=30) + + def test_xcompat_plot_period(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + msg = r"PeriodDtype\[B\] is deprecated " + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex) + _check_ticks_props(ax, xrot=0) + + def test_period_compat(self): + # GH 9012 + # period-array conversions + df = DataFrame( + np.random.default_rng(2).random((21, 2)), + index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)), + columns=["a", "b"], + ) + + df.plot() + mpl.pyplot.axhline(y=0) + + @pytest.mark.parametrize("index_dtype", [np.int64, np.float64]) + def test_unsorted_index(self, index_dtype): + df = DataFrame( + {"y": np.arange(100)}, + index=Index(np.arange(99, -1, -1), dtype=index_dtype), + dtype=np.int64, + ) + ax = df.plot() + lines = ax.get_lines()[0] + rs = lines.get_xydata() + rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y") + tm.assert_series_equal(rs, df.y, check_index_type=False) + + @pytest.mark.parametrize( + "df", + [ + DataFrame({"y": [0.0, 1.0, 2.0, 3.0]}, index=[1.0, 0.0, 3.0, 2.0]), + DataFrame( + {"y": [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0]}, + index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0], + ), + ], + ) + def test_unsorted_index_lims(self, df): + ax = df.plot() + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= np.nanmin(lines[0].get_data()[0]) + assert xmax >= np.nanmax(lines[0].get_data()[0]) + + def test_unsorted_index_lims_x_y(self): + df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0], "z": [91.0, 90.0, 93.0, 92.0]}) + ax = df.plot(x="z", y="y") + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= np.nanmin(lines[0].get_data()[0]) + assert xmax >= np.nanmax(lines[0].get_data()[0]) + + def test_negative_log(self): + df = -DataFrame( + np.random.default_rng(2).random((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + msg = "Log-y scales are not supported in area plot" + with pytest.raises(ValueError, match=msg): + df.plot.area(logy=True) + with pytest.raises(ValueError, match=msg): + df.plot.area(loglog=True) + + def _compare_stacked_y_cood(self, normal_lines, stacked_lines): + base = np.zeros(len(normal_lines[0].get_data()[1])) + for nl, sl in zip(normal_lines, stacked_lines): + base += nl.get_data()[1] # get y coordinates + sy = sl.get_data()[1] + tm.assert_numpy_array_equal(base, sy) + + @pytest.mark.parametrize("kind", ["line", "area"]) + @pytest.mark.parametrize("mult", [1, -1]) + def test_line_area_stacked(self, kind, mult): + df = mult * DataFrame( + np.random.default_rng(2).random((6, 4)), columns=["w", "x", "y", "z"] + ) + + ax1 = _check_plot_works(df.plot, kind=kind, stacked=False) + ax2 = _check_plot_works(df.plot, kind=kind, stacked=True) + self._compare_stacked_y_cood(ax1.lines, ax2.lines) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_line_area_stacked_sep_df(self, kind): + # each column has either positive or negative value + sep_df = DataFrame( + { + "w": np.random.default_rng(2).random(6), + "x": np.random.default_rng(2).random(6), + "y": -np.random.default_rng(2).random(6), + "z": -np.random.default_rng(2).random(6), + } + ) + ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False) + ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True) + self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2]) + self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:]) + + def test_line_area_stacked_mixed(self): + mixed_df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["w", "x", "y", "z"], + ) + _check_plot_works(mixed_df.plot, stacked=False) + + msg = ( + "When stacked is True, each column must be either all positive or " + "all negative. Column 'w' contains both positive and negative " + "values" + ) + with pytest.raises(ValueError, match=msg): + mixed_df.plot(stacked=True) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_line_area_stacked_positive_idx(self, kind): + df = DataFrame( + np.random.default_rng(2).random((6, 4)), columns=["w", "x", "y", "z"] + ) + # Use an index with strictly positive values, preventing + # matplotlib from warning about ignoring xlim + df2 = df.set_index(df.index + 1) + _check_plot_works(df2.plot, kind=kind, logx=True, stacked=True) + + @pytest.mark.parametrize( + "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)] + ) + def test_line_area_nan_df(self, idx): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({"a": values1, "b": values2}, index=idx) + + ax = _check_plot_works(df.plot) + masked1 = ax.lines[0].get_ydata() + masked2 = ax.lines[1].get_ydata() + # remove nan for comparison purpose + + exp = np.array([1, 2, 3], dtype=np.float64) + tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp) + + exp = np.array([3, 2, 1], dtype=np.float64) + tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp) + tm.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False])) + tm.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False])) + + @pytest.mark.parametrize( + "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)] + ) + def test_line_area_nan_df_stacked(self, idx): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({"a": values1, "b": values2}, index=idx) + + expected1 = np.array([1, 2, 0, 3], dtype=np.float64) + expected2 = np.array([3, 0, 2, 1], dtype=np.float64) + + ax = _check_plot_works(df.plot, stacked=True) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) + + @pytest.mark.parametrize( + "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)] + ) + @pytest.mark.parametrize("kwargs", [{}, {"stacked": False}]) + def test_line_area_nan_df_stacked_area(self, idx, kwargs): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({"a": values1, "b": values2}, index=idx) + + expected1 = np.array([1, 2, 0, 3], dtype=np.float64) + expected2 = np.array([3, 0, 2, 1], dtype=np.float64) + + ax = _check_plot_works(df.plot.area, **kwargs) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + if kwargs: + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) + else: + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) + + ax = _check_plot_works(df.plot.area, stacked=False) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) + + @pytest.mark.parametrize("kwargs", [{}, {"secondary_y": True}]) + def test_line_lim(self, kwargs): + df = DataFrame(np.random.default_rng(2).random((6, 3)), columns=["x", "y", "z"]) + ax = df.plot(**kwargs) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data()[0][0] + assert xmax >= lines[0].get_data()[0][-1] + + def test_line_lim_subplots(self): + df = DataFrame(np.random.default_rng(2).random((6, 3)), columns=["x", "y", "z"]) + axes = df.plot(secondary_y=True, subplots=True) + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + for ax in axes: + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data()[0][0] + assert xmax >= lines[0].get_data()[0][-1] + + @pytest.mark.xfail( + strict=False, + reason="2020-12-01 this has been failing periodically on the " + "ymin==0 assertion for a week or so.", + ) + @pytest.mark.parametrize("stacked", [True, False]) + def test_area_lim(self, stacked): + df = DataFrame( + np.random.default_rng(2).random((6, 4)), columns=["x", "y", "z", "four"] + ) + + neg_df = -df + + ax = _check_plot_works(df.plot.area, stacked=stacked) + xmin, xmax = ax.get_xlim() + ymin, ymax = ax.get_ylim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data()[0][0] + assert xmax >= lines[0].get_data()[0][-1] + assert ymin == 0 + + ax = _check_plot_works(neg_df.plot.area, stacked=stacked) + ymin, ymax = ax.get_ylim() + assert ymax == 0 + + def test_area_sharey_dont_overwrite(self): + # GH37942 + df = DataFrame(np.random.default_rng(2).random((4, 2)), columns=["x", "y"]) + fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2, sharey=True) + + df.plot(ax=ax1, kind="area") + df.plot(ax=ax2, kind="area") + + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + + @pytest.mark.parametrize("stacked", [True, False]) + def test_bar_linewidth(self, stacked): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + ax = df.plot.bar(stacked=stacked, linewidth=2) + for r in ax.patches: + assert r.get_linewidth() == 2 + + def test_bar_linewidth_subplots(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # subplots + axes = df.plot.bar(linewidth=2, subplots=True) + _check_axes_shape(axes, axes_num=5, layout=(5, 1)) + for ax in axes: + for r in ax.patches: + assert r.get_linewidth() == 2 + + @pytest.mark.parametrize( + "meth, dim", [("bar", "get_width"), ("barh", "get_height")] + ) + @pytest.mark.parametrize("stacked", [True, False]) + def test_bar_barwidth(self, meth, dim, stacked): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + width = 0.9 + + ax = getattr(df.plot, meth)(stacked=stacked, width=width) + for r in ax.patches: + if not stacked: + assert getattr(r, dim)() == width / len(df.columns) + else: + assert getattr(r, dim)() == width + + @pytest.mark.parametrize( + "meth, dim", [("bar", "get_width"), ("barh", "get_height")] + ) + def test_barh_barwidth_subplots(self, meth, dim): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + width = 0.9 + + axes = getattr(df.plot, meth)(width=width, subplots=True) + for ax in axes: + for r in ax.patches: + assert getattr(r, dim)() == width + + def test_bar_bottom_left_bottom(self): + df = DataFrame(np.random.default_rng(2).random((5, 5))) + ax = df.plot.bar(stacked=False, bottom=1) + result = [p.get_y() for p in ax.patches] + assert result == [1] * 25 + + ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5]) + result = [p.get_y() for p in ax.patches[:5]] + assert result == [-1, -2, -3, -4, -5] + + def test_bar_bottom_left_left(self): + df = DataFrame(np.random.default_rng(2).random((5, 5))) + ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1])) + result = [p.get_x() for p in ax.patches] + assert result == [1] * 25 + + ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5]) + result = [p.get_x() for p in ax.patches[:5]] + assert result == [1, 2, 3, 4, 5] + + def test_bar_bottom_left_subplots(self): + df = DataFrame(np.random.default_rng(2).random((5, 5))) + axes = df.plot.bar(subplots=True, bottom=-1) + for ax in axes: + result = [p.get_y() for p in ax.patches] + assert result == [-1] * 5 + + axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1])) + for ax in axes: + result = [p.get_x() for p in ax.patches] + assert result == [1] * 5 + + def test_bar_nan(self): + df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]}) + ax = df.plot.bar() + expected = [10, 0, 20, 5, 10, 20, 1, 2, 3] + result = [p.get_height() for p in ax.patches] + assert result == expected + + def test_bar_nan_stacked(self): + df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]}) + ax = df.plot.bar(stacked=True) + expected = [10, 0, 20, 5, 10, 20, 1, 2, 3] + result = [p.get_height() for p in ax.patches] + assert result == expected + + result = [p.get_y() for p in ax.patches] + expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0] + assert result == expected + + @pytest.mark.parametrize("idx", [Index, pd.CategoricalIndex]) + def test_bar_categorical(self, idx): + # GH 13019 + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 5)), + index=idx(list("ABCDEF")), + columns=idx(list("abcde")), + ) + + ax = df.plot.bar() + ticks = ax.xaxis.get_ticklocs() + tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5])) + assert ax.get_xlim() == (-0.5, 5.5) + # check left-edge of bars + assert ax.patches[0].get_x() == -0.25 + assert ax.patches[-1].get_x() == 5.15 + + ax = df.plot.bar(stacked=True) + tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5])) + assert ax.get_xlim() == (-0.5, 5.5) + assert ax.patches[0].get_x() == -0.25 + assert ax.patches[-1].get_x() == 4.75 + + @pytest.mark.parametrize("x, y", [("x", "y"), (1, 2)]) + def test_plot_scatter(self, x, y): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + def test_plot_scatter_error(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + msg = re.escape("scatter() missing 1 required positional argument: 'y'") + with pytest.raises(TypeError, match=msg): + df.plot.scatter(x="x") + msg = re.escape("scatter() missing 1 required positional argument: 'x'") + with pytest.raises(TypeError, match=msg): + df.plot.scatter(y="y") + + def test_plot_scatter_shape(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + # GH 6951 + axes = df.plot(x="x", y="y", kind="scatter", subplots=True) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + def test_raise_error_on_datetime_time_data(self): + # GH 8113, datetime.time type is not supported by matplotlib in scatter + df = DataFrame(np.random.default_rng(2).standard_normal(10), columns=["a"]) + df["dtime"] = date_range(start="2014-01-01", freq="h", periods=10).time + msg = "must be a string or a (real )?number, not 'datetime.time'" + + with pytest.raises(TypeError, match=msg): + df.plot(kind="scatter", x="dtime", y="a") + + @pytest.mark.parametrize("x, y", [("dates", "vals"), (0, 1)]) + def test_scatterplot_datetime_data(self, x, y): + # GH 30391 + dates = date_range(start=date(2019, 1, 1), periods=12, freq="W") + vals = np.random.default_rng(2).normal(0, 1, len(dates)) + df = DataFrame({"dates": dates, "vals": vals}) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + @pytest.mark.parametrize("x, y", [("a", "b"), (0, 1)]) + @pytest.mark.parametrize("b_col", [[2, 3, 4], ["a", "b", "c"]]) + def test_scatterplot_object_data(self, b_col, x, y, infer_string): + # GH 18755 + with option_context("future.infer_string", infer_string): + df = DataFrame({"a": ["A", "B", "C"], "b": b_col}) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + @pytest.mark.parametrize("ordered", [True, False]) + @pytest.mark.parametrize( + "categories", + (["setosa", "versicolor", "virginica"], ["versicolor", "virginica", "setosa"]), + ) + def test_scatterplot_color_by_categorical(self, ordered, categories): + df = DataFrame( + [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]], + columns=["length", "width"], + ) + df["species"] = pd.Categorical( + ["setosa", "setosa", "virginica", "virginica", "versicolor"], + ordered=ordered, + categories=categories, + ) + ax = df.plot.scatter(x=0, y=1, c="species") + (colorbar_collection,) = ax.collections + colorbar = colorbar_collection.colorbar + + expected_ticks = np.array([0.5, 1.5, 2.5]) + result_ticks = colorbar.get_ticks() + tm.assert_numpy_array_equal(result_ticks, expected_ticks) + + expected_boundaries = np.array([0.0, 1.0, 2.0, 3.0]) + result_boundaries = colorbar._boundaries + tm.assert_numpy_array_equal(result_boundaries, expected_boundaries) + + expected_yticklabels = categories + result_yticklabels = [i.get_text() for i in colorbar.ax.get_ymajorticklabels()] + assert all(i == j for i, j in zip(result_yticklabels, expected_yticklabels)) + + @pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")]) + def test_plot_scatter_with_categorical_data(self, x, y): + # after fixing GH 18755, should be able to plot categorical data + df = DataFrame({"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])}) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + @pytest.mark.parametrize("x, y, c", [("x", "y", "z"), (0, 1, 2)]) + def test_plot_scatter_with_c(self, x, y, c): + df = DataFrame( + np.random.default_rng(2).integers(low=0, high=100, size=(6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + + ax = df.plot.scatter(x=x, y=y, c=c) + # default to Greys + assert ax.collections[0].cmap.name == "Greys" + + assert ax.collections[0].colorbar.ax.get_ylabel() == "z" + + def test_plot_scatter_with_c_props(self): + df = DataFrame( + np.random.default_rng(2).integers(low=0, high=100, size=(6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + cm = "cubehelix" + ax = df.plot.scatter(x="x", y="y", c="z", colormap=cm) + assert ax.collections[0].cmap.name == cm + + # verify turning off colorbar works + ax = df.plot.scatter(x="x", y="y", c="z", colorbar=False) + assert ax.collections[0].colorbar is None + + # verify that we can still plot a solid color + ax = df.plot.scatter(x=0, y=1, c="red") + assert ax.collections[0].colorbar is None + _check_colors(ax.collections, facecolors=["r"]) + + def test_plot_scatter_with_c_array(self): + # Ensure that we can pass an np.array straight through to matplotlib, + # this functionality was accidentally removed previously. + # See https://github.com/pandas-dev/pandas/issues/8852 for bug report + # + # Exercise colormap path and non-colormap path as they are independent + # + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + red_rgba = [1.0, 0.0, 0.0, 1.0] + green_rgba = [0.0, 1.0, 0.0, 1.0] + rgba_array = np.array([red_rgba, green_rgba]) + ax = df.plot.scatter(x="A", y="B", c=rgba_array) + # expect the face colors of the points in the non-colormap path to be + # identical to the values we supplied, normally we'd be on shaky ground + # comparing floats for equality but here we expect them to be + # identical. + tm.assert_numpy_array_equal(ax.collections[0].get_facecolor(), rgba_array) + # we don't test the colors of the faces in this next plot because they + # are dependent on the spring colormap, which may change its colors + # later. + float_array = np.array([0.0, 1.0]) + df.plot.scatter(x="A", y="B", c=float_array, cmap="spring") + + def test_plot_scatter_with_s(self): + # this refers to GH 32904 + df = DataFrame( + np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"] + ) + + ax = df.plot.scatter(x="a", y="b", s="c") + tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes()) + + def test_plot_scatter_with_norm(self): + # added while fixing GH 45809 + df = DataFrame( + np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"] + ) + norm = mpl.colors.LogNorm() + ax = df.plot.scatter(x="a", y="b", c="c", norm=norm) + assert ax.collections[0].norm is norm + + def test_plot_scatter_without_norm(self): + # added while fixing GH 45809 + df = DataFrame( + np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"] + ) + ax = df.plot.scatter(x="a", y="b", c="c") + plot_norm = ax.collections[0].norm + color_min_max = (df.c.min(), df.c.max()) + default_norm = mpl.colors.Normalize(*color_min_max) + for value in df.c: + assert plot_norm(value) == default_norm(value) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", + [ + {}, + {"legend": False}, + {"default_axes": True, "subplots": True}, + {"stacked": True}, + ], + ) + def test_plot_bar(self, kwargs): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + + _check_plot_works(df.plot.bar, **kwargs) + + @pytest.mark.slow + def test_plot_bar_int_col(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 15)), + index=list(string.ascii_letters[:10]), + columns=range(15), + ) + _check_plot_works(df.plot.bar) + + @pytest.mark.slow + def test_plot_bar_ticks(self): + df = DataFrame({"a": [0, 1], "b": [1, 0]}) + ax = _check_plot_works(df.plot.bar) + _check_ticks_props(ax, xrot=90) + + ax = df.plot.bar(rot=35, fontsize=10) + _check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10) + + @pytest.mark.slow + def test_plot_barh_ticks(self): + df = DataFrame({"a": [0, 1], "b": [1, 0]}) + ax = _check_plot_works(df.plot.barh) + _check_ticks_props(ax, yrot=0) + + ax = df.plot.barh(rot=55, fontsize=11) + _check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11) + + def test_boxplot(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + + ax = _check_plot_works(df.plot.box) + _check_text_labels(ax.get_xticklabels(), labels) + tm.assert_numpy_array_equal( + ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1) + ) + assert len(ax.lines) == 7 * len(numeric_cols) + + def test_boxplot_series(self, hist_df): + df = hist_df + series = df["height"] + axes = series.plot.box(rot=40) + _check_ticks_props(axes, xrot=40, yrot=0) + + _check_plot_works(series.plot.box) + + def test_boxplot_series_positions(self, hist_df): + df = hist_df + positions = np.array([1, 6, 7]) + ax = df.plot.box(positions=positions) + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + _check_text_labels(ax.get_xticklabels(), labels) + tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions) + assert len(ax.lines) == 7 * len(numeric_cols) + + def test_boxplot_vertical(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + + # if horizontal, yticklabels are rotated + ax = df.plot.box(rot=50, fontsize=8, vert=False) + _check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8) + _check_text_labels(ax.get_yticklabels(), labels) + assert len(ax.lines) == 7 * len(numeric_cols) + + @pytest.mark.filterwarnings("ignore:Attempt:UserWarning") + def test_boxplot_vertical_subplots(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + axes = _check_plot_works( + df.plot.box, + default_axes=True, + subplots=True, + vert=False, + logx=True, + ) + _check_axes_shape(axes, axes_num=3, layout=(1, 3)) + _check_ax_scales(axes, xaxis="log") + for ax, label in zip(axes, labels): + _check_text_labels(ax.get_yticklabels(), [label]) + assert len(ax.lines) == 7 + + def test_boxplot_vertical_positions(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + positions = np.array([3, 2, 8]) + ax = df.plot.box(positions=positions, vert=False) + _check_text_labels(ax.get_yticklabels(), labels) + tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions) + assert len(ax.lines) == 7 * len(numeric_cols) + + def test_boxplot_return_type_invalid(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + msg = "return_type must be {None, 'axes', 'dict', 'both'}" + with pytest.raises(ValueError, match=msg): + df.plot.box(return_type="not_a_type") + + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_boxplot_return_type_invalid_type(self, return_type): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + result = df.plot.box(return_type=return_type) + _check_box_return_type(result, return_type) + + def test_kde_df(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))) + ax = _check_plot_works(df.plot, kind="kde") + expected = [pprint_thing(c) for c in df.columns] + _check_legend_labels(ax, labels=expected) + _check_ticks_props(ax, xrot=0) + + def test_kde_df_rot(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + ax = df.plot(kind="kde", rot=20, fontsize=5) + _check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5) + + def test_kde_df_subplots(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + axes = _check_plot_works( + df.plot, + default_axes=True, + kind="kde", + subplots=True, + ) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + + def test_kde_df_logy(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + axes = df.plot(kind="kde", logy=True, subplots=True) + _check_ax_scales(axes, yaxis="log") + + def test_kde_missing_vals(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).uniform(size=(100, 4))) + df.loc[0, 0] = np.nan + _check_plot_works(df.plot, kind="kde") + + def test_hist_df(self): + df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))) + + ax = _check_plot_works(df.plot.hist) + expected = [pprint_thing(c) for c in df.columns] + _check_legend_labels(ax, labels=expected) + + axes = _check_plot_works( + df.plot.hist, + default_axes=True, + subplots=True, + logy=True, + ) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + _check_ax_scales(axes, yaxis="log") + + def test_hist_df_series(self): + series = Series(np.random.default_rng(2).random(10)) + axes = series.plot.hist(rot=40) + _check_ticks_props(axes, xrot=40, yrot=0) + + def test_hist_df_series_cumulative_density(self): + from matplotlib.patches import Rectangle + + series = Series(np.random.default_rng(2).random(10)) + ax = series.plot.hist(cumulative=True, bins=4, density=True) + # height of last bin (index 5) must be 1.0 + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + tm.assert_almost_equal(rects[-1].get_height(), 1.0) + + def test_hist_df_series_cumulative(self): + from matplotlib.patches import Rectangle + + series = Series(np.random.default_rng(2).random(10)) + ax = series.plot.hist(cumulative=True, bins=4) + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + + tm.assert_almost_equal(rects[-2].get_height(), 10.0) + + def test_hist_df_orientation(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + # if horizontal, yticklabels are rotated + axes = df.plot.hist(rot=50, fontsize=8, orientation="horizontal") + _check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8) + + @pytest.mark.parametrize( + "weights", [0.1 * np.ones(shape=(100,)), 0.1 * np.ones(shape=(100, 2))] + ) + def test_hist_weights(self, weights): + # GH 33173 + + df = DataFrame( + dict(zip(["A", "B"], np.random.default_rng(2).standard_normal((2, 100)))) + ) + + ax1 = _check_plot_works(df.plot, kind="hist", weights=weights) + ax2 = _check_plot_works(df.plot, kind="hist") + + patch_height_with_weights = [patch.get_height() for patch in ax1.patches] + + # original heights with no weights, and we manually multiply with example + # weights, so after multiplication, they should be almost same + expected_patch_height = [0.1 * patch.get_height() for patch in ax2.patches] + + tm.assert_almost_equal(patch_height_with_weights, expected_patch_height) + + def _check_box_coord( + self, + patches, + expected_y=None, + expected_h=None, + expected_x=None, + expected_w=None, + ): + result_y = np.array([p.get_y() for p in patches]) + result_height = np.array([p.get_height() for p in patches]) + result_x = np.array([p.get_x() for p in patches]) + result_width = np.array([p.get_width() for p in patches]) + # dtype is depending on above values, no need to check + + if expected_y is not None: + tm.assert_numpy_array_equal(result_y, expected_y, check_dtype=False) + if expected_h is not None: + tm.assert_numpy_array_equal(result_height, expected_h, check_dtype=False) + if expected_x is not None: + tm.assert_numpy_array_equal(result_x, expected_x, check_dtype=False) + if expected_w is not None: + tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False) + + @pytest.mark.parametrize( + "data", + [ + { + "A": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])), + "B": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([8, 8, 8, 8, 8])), + "C": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10])), + }, + { + "A": np.repeat( + np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6]) + ), + "B": np.repeat( + np.array([1, np.nan, 2, 3, 4, 5]), np.array([8, 3, 8, 8, 8, 8]) + ), + "C": np.repeat( + np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10]) + ), + }, + ], + ) + def test_hist_df_coord(self, data): + df = DataFrame(data) + + ax = df.plot.hist(bins=5) + self._check_box_coord( + ax.patches[:5], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([6, 7, 8, 9, 10]), + ) + + ax = df.plot.hist(bins=5, stacked=True) + self._check_box_coord( + ax.patches[:5], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_y=np.array([10, 9, 8, 7, 6]), + expected_h=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_y=np.array([18, 17, 16, 15, 14]), + expected_h=np.array([6, 7, 8, 9, 10]), + ) + + axes = df.plot.hist(bins=5, stacked=True, subplots=True) + self._check_box_coord( + axes[0].patches, + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + axes[1].patches, + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + axes[2].patches, + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([6, 7, 8, 9, 10]), + ) + + # horizontal + ax = df.plot.hist(bins=5, orientation="horizontal") + self._check_box_coord( + ax.patches[:5], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10]), + ) + + ax = df.plot.hist(bins=5, stacked=True, orientation="horizontal") + self._check_box_coord( + ax.patches[:5], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_x=np.array([10, 9, 8, 7, 6]), + expected_w=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_x=np.array([18, 17, 16, 15, 14]), + expected_w=np.array([6, 7, 8, 9, 10]), + ) + + axes = df.plot.hist( + bins=5, stacked=True, subplots=True, orientation="horizontal" + ) + self._check_box_coord( + axes[0].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + axes[1].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + axes[2].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10]), + ) + + def test_plot_int_columns(self): + df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))).cumsum() + _check_plot_works(df.plot, legend=True) + + @pytest.mark.parametrize( + "markers", + [ + {0: "^", 1: "+", 2: "o"}, + {0: "^", 1: "+"}, + ["^", "+", "o"], + ["^", "+"], + ], + ) + def test_style_by_column(self, markers): + import matplotlib.pyplot as plt + + fig = plt.gcf() + fig.clf() + fig.add_subplot(111) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 3))) + ax = df.plot(style=markers) + for idx, line in enumerate(ax.get_lines()[: len(markers)]): + assert line.get_marker() == markers[idx] + + def test_line_label_none(self): + s = Series([1, 2]) + ax = s.plot() + assert ax.get_legend() is None + + ax = s.plot(legend=True) + assert ax.get_legend().get_texts()[0].get_text() == "" + + @pytest.mark.parametrize( + "props, expected", + [ + ("boxprops", "boxes"), + ("whiskerprops", "whiskers"), + ("capprops", "caps"), + ("medianprops", "medians"), + ], + ) + def test_specified_props_kwd_plot_box(self, props, expected): + # GH 30346 + df = DataFrame({k: np.random.default_rng(2).random(100) for k in "ABC"}) + kwd = {props: {"color": "C1"}} + result = df.plot.box(return_type="dict", **kwd) + + assert result[expected][0].get_color() == "C1" + + def test_unordered_ts(self): + # GH#2609, GH#55906 + index = [date(2012, 10, 1), date(2012, 9, 1), date(2012, 8, 1)] + values = [3.0, 2.0, 1.0] + df = DataFrame( + np.array(values), + index=index, + columns=["test"], + ) + ax = df.plot() + xticks = ax.lines[0].get_xdata() + tm.assert_numpy_array_equal(xticks, np.array(index, dtype=object)) + ydata = ax.lines[0].get_ydata() + tm.assert_numpy_array_equal(ydata, np.array(values)) + + # even though we don't sort the data before passing it to matplotlib, + # the ticks are sorted + xticks = ax.xaxis.get_ticklabels() + xlocs = [x.get_position()[0] for x in xticks] + assert Index(xlocs).is_monotonic_increasing + xlabels = [x.get_text() for x in xticks] + assert pd.to_datetime(xlabels, format="%Y-%m-%d").is_monotonic_increasing + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_kind_both_ways(self, kind): + pytest.importorskip("scipy") + df = DataFrame({"x": [1, 2, 3]}) + df.plot(kind=kind) + getattr(df.plot, kind)() + + @pytest.mark.parametrize("kind", ["scatter", "hexbin"]) + def test_kind_both_ways_x_y(self, kind): + pytest.importorskip("scipy") + df = DataFrame({"x": [1, 2, 3]}) + df.plot("x", "x", kind=kind) + getattr(df.plot, kind)("x", "x") + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_all_invalid_plot_data(self, kind): + df = DataFrame(list("abcd")) + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df.plot(kind=kind) + + @pytest.mark.parametrize( + "kind", list(plotting.PlotAccessor._common_kinds) + ["area"] + ) + def test_partially_invalid_plot_data_numeric(self, kind): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + dtype=object, + ) + df[np.random.default_rng(2).random(df.shape[0]) > 0.5] = "a" + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df.plot(kind=kind) + + def test_invalid_kind(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + msg = "invalid_plot_kind is not a valid plot kind" + with pytest.raises(ValueError, match=msg): + df.plot(kind="invalid_plot_kind") + + @pytest.mark.parametrize( + "x,y,lbl", + [ + (["B", "C"], "A", "a"), + (["A"], ["B", "C"], ["b", "c"]), + ], + ) + def test_invalid_xy_args(self, x, y, lbl): + # GH 18671, 19699 allows y to be list-like but not x + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + with pytest.raises(ValueError, match="x must be a label or position"): + df.plot(x=x, y=y, label=lbl) + + def test_bad_label(self): + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + msg = "label should be list-like and same length as y" + with pytest.raises(ValueError, match=msg): + df.plot(x="A", y=["B", "C"], label="bad_label") + + @pytest.mark.parametrize("x,y", [("A", "B"), (["A"], "B")]) + def test_invalid_xy_args_dup_cols(self, x, y): + # GH 18671, 19699 allows y to be list-like but not x + df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list("AAB")) + with pytest.raises(ValueError, match="x must be a label or position"): + df.plot(x=x, y=y) + + @pytest.mark.parametrize( + "x,y,lbl,colors", + [ + ("A", ["B"], ["b"], ["red"]), + ("A", ["B", "C"], ["b", "c"], ["red", "blue"]), + (0, [1, 2], ["bokeh", "cython"], ["green", "yellow"]), + ], + ) + def test_y_listlike(self, x, y, lbl, colors): + # GH 19699: tests list-like y and verifies lbls & colors + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + _check_plot_works(df.plot, x="A", y=y, label=lbl) + + ax = df.plot(x=x, y=y, label=lbl, color=colors) + assert len(ax.lines) == len(y) + _check_colors(ax.get_lines(), linecolors=colors) + + @pytest.mark.parametrize("x,y,colnames", [(0, 1, ["A", "B"]), (1, 0, [0, 1])]) + def test_xy_args_integer(self, x, y, colnames): + # GH 20056: tests integer args for xy and checks col names + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + df.columns = colnames + _check_plot_works(df.plot, x=x, y=y) + + def test_hexbin_basic(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + + ax = df.plot.hexbin(x="A", y="B", gridsize=10) + # TODO: need better way to test. This just does existence. + assert len(ax.collections) == 1 + + def test_hexbin_basic_subplots(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + # GH 6951 + axes = df.plot.hexbin(x="A", y="B", subplots=True) + # hexbin should have 2 axes in the figure, 1 for plotting and another + # is colorbar + assert len(axes[0].figure.axes) == 2 + # return value is single axes + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.parametrize("reduce_C", [None, np.std]) + def test_hexbin_with_c(self, reduce_C): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + + ax = df.plot.hexbin(x="A", y="B", C="C", reduce_C_function=reduce_C) + assert len(ax.collections) == 1 + + @pytest.mark.parametrize( + "kwargs, expected", + [ + ({}, "BuGn"), # default cmap + ({"colormap": "cubehelix"}, "cubehelix"), + ({"cmap": "YlGn"}, "YlGn"), + ], + ) + def test_hexbin_cmap(self, kwargs, expected): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + ax = df.plot.hexbin(x="A", y="B", **kwargs) + assert ax.collections[0].cmap.name == expected + + def test_pie_df_err(self): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + msg = "pie requires either y column or 'subplots=True'" + with pytest.raises(ValueError, match=msg): + df.plot.pie() + + @pytest.mark.parametrize("y", ["Y", 2]) + def test_pie_df(self, y): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + ax = _check_plot_works(df.plot.pie, y=y) + _check_text_labels(ax.texts, df.index) + + def test_pie_df_subplots(self): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + axes = _check_plot_works( + df.plot.pie, + default_axes=True, + subplots=True, + ) + assert len(axes) == len(df.columns) + for ax in axes: + _check_text_labels(ax.texts, df.index) + for ax, ylabel in zip(axes, df.columns): + assert ax.get_ylabel() == ylabel + + def test_pie_df_labels_colors(self): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + labels = ["A", "B", "C", "D", "E"] + color_args = ["r", "g", "b", "c", "m"] + axes = _check_plot_works( + df.plot.pie, + default_axes=True, + subplots=True, + labels=labels, + colors=color_args, + ) + assert len(axes) == len(df.columns) + + for ax in axes: + _check_text_labels(ax.texts, labels) + _check_colors(ax.patches, facecolors=color_args) + + def test_pie_df_nan(self): + df = DataFrame(np.random.default_rng(2).random((4, 4))) + for i in range(4): + df.iloc[i, i] = np.nan + _, axes = mpl.pyplot.subplots(ncols=4) + + # GH 37668 + kwargs = {"normalize": True} + + with tm.assert_produces_warning(None): + df.plot.pie(subplots=True, ax=axes, legend=True, **kwargs) + + base_expected = ["0", "1", "2", "3"] + for i, ax in enumerate(axes): + expected = list(base_expected) # force copy + expected[i] = "" + result = [x.get_text() for x in ax.texts] + assert result == expected + + # legend labels + # NaN's not included in legend with subplots + # see https://github.com/pandas-dev/pandas/issues/8390 + result_labels = [x.get_text() for x in ax.get_legend().get_texts()] + expected_labels = base_expected[:i] + base_expected[i + 1 :] + assert result_labels == expected_labels + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", + [ + {"logy": True}, + {"logx": True, "logy": True}, + {"loglog": True}, + ], + ) + def test_errorbar_plot(self, kwargs): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4} + df_err = DataFrame(d_err) + + # check line plots + ax = _check_plot_works(df.plot, yerr=df_err, **kwargs) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + def test_errorbar_plot_bar(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4} + df_err = DataFrame(d_err) + ax = _check_plot_works( + (df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True + ) + _check_has_errorbars(ax, xerr=2, yerr=2) + + @pytest.mark.slow + def test_errorbar_plot_yerr_array(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + # yerr is raw error values + ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4) + _check_has_errorbars(ax, xerr=0, yerr=1) + + ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("yerr", ["yerr", "誤差"]) + def test_errorbar_plot_column_name(self, yerr): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + df[yerr] = np.ones(12) * 0.2 + + ax = _check_plot_works(df.plot, yerr=yerr) + _check_has_errorbars(ax, xerr=0, yerr=2) + + ax = _check_plot_works(df.plot, y="y", x="x", yerr=yerr) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + def test_errorbar_plot_external_valueerror(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + with tm.external_error_raised(ValueError): + df.plot(yerr=np.random.default_rng(2).standard_normal(11)) + + @pytest.mark.slow + def test_errorbar_plot_external_typeerror(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + df_err = DataFrame({"x": ["zzz"] * 12, "y": ["zzz"] * 12}) + with tm.external_error_raised(TypeError): + df.plot(yerr=df_err) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + @pytest.mark.parametrize( + "y_err", + [ + Series(np.ones(12) * 0.2, name="x"), + DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}), + ], + ) + def test_errorbar_plot_different_yerr(self, kind, y_err): + df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)}) + + ax = _check_plot_works(df.plot, yerr=y_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + @pytest.mark.parametrize( + "y_err, x_err", + [ + ( + DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}), + DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}), + ), + (Series(np.ones(12) * 0.2, name="x"), Series(np.ones(12) * 0.2, name="x")), + (0.2, 0.2), + ], + ) + def test_errorbar_plot_different_yerr_xerr(self, kind, y_err, x_err): + df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)}) + ax = _check_plot_works(df.plot, yerr=y_err, xerr=x_err, kind=kind) + _check_has_errorbars(ax, xerr=2, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + def test_errorbar_plot_different_yerr_xerr_subplots(self, kind): + df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)}) + df_err = DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}) + axes = _check_plot_works( + df.plot, + default_axes=True, + yerr=df_err, + xerr=df_err, + subplots=True, + kind=kind, + ) + _check_has_errorbars(axes, xerr=1, yerr=1) + + @pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError) + def test_errorbar_plot_iterator(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + + # yerr is iterator + ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df))) + _check_has_errorbars(ax, xerr=0, yerr=2) + + def test_errorbar_with_integer_column_names(self): + # test with integer column names + df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 2)))) + df_err = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 2)))) + ax = _check_plot_works(df.plot, yerr=df_err) + _check_has_errorbars(ax, xerr=0, yerr=2) + ax = _check_plot_works(df.plot, y=0, yerr=1) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar"]) + def test_errorbar_with_partial_columns_kind(self, kind): + df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 3)))) + df_err = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 2))), columns=[0, 2] + ) + ax = _check_plot_works(df.plot, yerr=df_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + def test_errorbar_with_partial_columns_dti(self): + df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 3)))) + df_err = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 2))), columns=[0, 2] + ) + ix = date_range("1/1/2000", periods=10, freq="ME") + df.set_index(ix, inplace=True) + df_err.set_index(ix, inplace=True) + ax = _check_plot_works(df.plot, yerr=df_err, kind="line") + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("err_box", [lambda x: x, DataFrame]) + def test_errorbar_with_partial_columns_box(self, err_box): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + err = err_box({"x": np.ones(12) * 0.2, "z": np.ones(12) * 0.4}) + ax = _check_plot_works(df.plot, yerr=err) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + def test_errorbar_timeseries(self, kind): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4} + + # check time-series plots + ix = date_range("1/1/2000", "1/1/2001", freq="ME") + tdf = DataFrame(d, index=ix) + tdf_err = DataFrame(d_err, index=ix) + + ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=1) + + ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=1) + + ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + axes = _check_plot_works( + tdf.plot, + default_axes=True, + kind=kind, + yerr=tdf_err, + subplots=True, + ) + _check_has_errorbars(axes, xerr=0, yerr=1) + + def test_errorbar_asymmetrical(self): + err = np.random.default_rng(2).random((3, 2, 5)) + + # each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]... + df = DataFrame(np.arange(15).reshape(3, 5)).T + + ax = df.plot(yerr=err, xerr=err / 2) + + yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1] + expected_0_0 = err[0, :, 0] * np.array([-1, 1]) + tm.assert_almost_equal(yerr_0_0, expected_0_0) + + msg = re.escape( + "Asymmetrical error bars should be provided with the shape (3, 2, 5)" + ) + with pytest.raises(ValueError, match=msg): + df.plot(yerr=err.T) + + def test_table(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + _check_plot_works(df.plot, table=True) + _check_plot_works(df.plot, table=df) + + # GH 35945 UserWarning + with tm.assert_produces_warning(None): + ax = df.plot() + assert len(ax.tables) == 0 + plotting.table(ax, df.T) + assert len(ax.tables) == 1 + + def test_errorbar_scatter(self): + df = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((5, 2))), + index=range(5), + columns=["x", "y"], + ) + df_err = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((5, 2))) / 5, + index=range(5), + columns=["x", "y"], + ) + + ax = _check_plot_works(df.plot.scatter, x="x", y="y") + _check_has_errorbars(ax, xerr=0, yerr=0) + ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err) + _check_has_errorbars(ax, xerr=1, yerr=0) + + ax = _check_plot_works(df.plot.scatter, x="x", y="y", yerr=df_err) + _check_has_errorbars(ax, xerr=0, yerr=1) + ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err, yerr=df_err) + _check_has_errorbars(ax, xerr=1, yerr=1) + + def test_errorbar_scatter_color(self): + def _check_errorbar_color(containers, expected, has_err="has_xerr"): + lines = [] + errs = next(c.lines for c in ax.containers if getattr(c, has_err, False)) + for el in errs: + if is_list_like(el): + lines.extend(el) + else: + lines.append(el) + err_lines = [x for x in lines if x in ax.collections] + _check_colors(err_lines, linecolors=np.array([expected] * len(err_lines))) + + # GH 8081 + df = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 5))), + columns=["a", "b", "c", "d", "e"], + ) + ax = df.plot.scatter(x="a", y="b", xerr="d", yerr="e", c="red") + _check_has_errorbars(ax, xerr=1, yerr=1) + _check_errorbar_color(ax.containers, "red", has_err="has_xerr") + _check_errorbar_color(ax.containers, "red", has_err="has_yerr") + + ax = df.plot.scatter(x="a", y="b", yerr="e", color="green") + _check_has_errorbars(ax, xerr=0, yerr=1) + _check_errorbar_color(ax.containers, "green", has_err="has_yerr") + + def test_scatter_unknown_colormap(self): + # GH#48726 + df = DataFrame({"a": [1, 2, 3], "b": 4}) + with pytest.raises((ValueError, KeyError), match="'unknown' is not a"): + df.plot(x="a", y="b", colormap="unknown", kind="scatter") + + def test_sharex_and_ax(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + # the axis in fig.get_axis() are sorted differently than pandas + # expected them, so make sure that only the right ones are removed + import matplotlib.pyplot as plt + + plt.close("all") + gs, axes = _generate_4_axes_via_gridspec() + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + + def _check(axes): + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[0], axes[2]]: + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + for ax in [axes[1], axes[3]]: + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax, sharex=True) + gs.tight_layout(plt.gcf()) + _check(axes) + plt.close("all") + + gs, axes = _generate_4_axes_via_gridspec() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True) + _check(axes) + + def test_sharex_false_and_ax(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + # the axis in fig.get_axis() are sorted differently than pandas + # expected them, so make sure that only the right ones are removed + import matplotlib.pyplot as plt + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + gs, axes = _generate_4_axes_via_gridspec() + # without sharex, no labels should be touched! + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax) + + gs.tight_layout(plt.gcf()) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + def test_sharey_and_ax(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + # the axis in fig.get_axis() are sorted differently than pandas + # expected them, so make sure that only the right ones are removed + import matplotlib.pyplot as plt + + gs, axes = _generate_4_axes_via_gridspec() + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + + def _check(axes): + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + for ax in [axes[0], axes[1]]: + _check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[2], axes[3]]: + _check_visible(ax.get_yticklabels(), visible=False) + + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax, sharey=True) + gs.tight_layout(plt.gcf()) + _check(axes) + plt.close("all") + + gs, axes = _generate_4_axes_via_gridspec() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharey=True) + + gs.tight_layout(plt.gcf()) + _check(axes) + + def test_sharey_and_ax_tight(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + import matplotlib.pyplot as plt + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + gs, axes = _generate_4_axes_via_gridspec() + # without sharex, no labels should be touched! + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax) + + gs.tight_layout(plt.gcf()) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds) + def test_memory_leak(self, kind): + """Check that every plot type gets properly collected.""" + pytest.importorskip("scipy") + args = {} + if kind in ["hexbin", "scatter", "pie"]: + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + args = {"x": "A", "y": "B"} + elif kind == "area": + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).abs() + else: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + # Use a weakref so we can see if the object gets collected without + # also preventing it from being collected + ref = weakref.ref(df.plot(kind=kind, **args)) + + # have matplotlib delete all the figures + plt.close("all") + # force a garbage collection + gc.collect() + assert ref() is None + + def test_df_gridspec_patterns_vert_horiz(self): + # GH 10819 + from matplotlib import gridspec + import matplotlib.pyplot as plt + + ts = Series( + np.random.default_rng(2).standard_normal(10), + index=date_range("1/1/2000", periods=10), + ) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=ts.index, + columns=list("AB"), + ) + + def _get_vertical_grid(): + gs = gridspec.GridSpec(3, 1) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:2, :]) + ax2 = fig.add_subplot(gs[2, :]) + return ax1, ax2 + + def _get_horizontal_grid(): + gs = gridspec.GridSpec(1, 3) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:, :2]) + ax2 = fig.add_subplot(gs[:, 2]) + return ax1, ax2 + + for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]: + ax1 = ts.plot(ax=ax1) + assert len(ax1.lines) == 1 + ax2 = df.plot(ax=ax2) + assert len(ax2.lines) == 2 + for ax in [ax1, ax2]: + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + # subplots=True + for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]: + axes = df.plot(subplots=True, ax=[ax1, ax2]) + assert len(ax1.lines) == 1 + assert len(ax2.lines) == 1 + for ax in axes: + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + # vertical / subplots / sharex=True / sharey=True + ax1, ax2 = _get_vertical_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True) + assert len(axes[0].lines) == 1 + assert len(axes[1].lines) == 1 + for ax in [ax1, ax2]: + # yaxis are visible because there is only one column + _check_visible(ax.get_yticklabels(), visible=True) + # xaxis of axes0 (top) are hidden + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[0].get_xticklabels(minor=True), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + _check_visible(axes[1].get_xticklabels(minor=True), visible=True) + plt.close("all") + + # horizontal / subplots / sharex=True / sharey=True + ax1, ax2 = _get_horizontal_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True) + assert len(axes[0].lines) == 1 + assert len(axes[1].lines) == 1 + _check_visible(axes[0].get_yticklabels(), visible=True) + # yaxis of axes1 (right) are hidden + _check_visible(axes[1].get_yticklabels(), visible=False) + for ax in [ax1, ax2]: + # xaxis are visible because there is only one column + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + def test_df_gridspec_patterns_boxed(self): + # GH 10819 + from matplotlib import gridspec + import matplotlib.pyplot as plt + + ts = Series( + np.random.default_rng(2).standard_normal(10), + index=date_range("1/1/2000", periods=10), + ) + + # boxed + def _get_boxed_grid(): + gs = gridspec.GridSpec(3, 3) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:2, :2]) + ax2 = fig.add_subplot(gs[:2, 2]) + ax3 = fig.add_subplot(gs[2, :2]) + ax4 = fig.add_subplot(gs[2, 2]) + return ax1, ax2, ax3, ax4 + + axes = _get_boxed_grid() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=ts.index, + columns=list("ABCD"), + ) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + assert len(ax.lines) == 1 + # axis are visible because these are not shared + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + # subplots / sharex=True / sharey=True + axes = _get_boxed_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True) + for ax in axes: + assert len(ax.lines) == 1 + for ax in [axes[0], axes[2]]: # left column + _check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[1], axes[3]]: # right column + _check_visible(ax.get_yticklabels(), visible=False) + for ax in [axes[0], axes[1]]: # top row + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + for ax in [axes[2], axes[3]]: # bottom row + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + def test_df_grid_settings(self): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + _check_grid_settings( + DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}), + plotting.PlotAccessor._dataframe_kinds, + kws={"x": "a", "y": "b"}, + ) + + def test_plain_axes(self): + # supplied ax itself is a SubplotAxes, but figure contains also + # a plain Axes object (GH11556) + fig, ax = mpl.pyplot.subplots() + fig.add_axes([0.2, 0.2, 0.2, 0.2]) + Series(np.random.default_rng(2).random(10)).plot(ax=ax) + + def test_plain_axes_df(self): + # supplied ax itself is a plain Axes, but because the cmap keyword + # a new ax is created for the colorbar -> also multiples axes (GH11520) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(8), + "b": np.random.default_rng(2).standard_normal(8), + } + ) + fig = mpl.pyplot.figure() + ax = fig.add_axes((0, 0, 1, 1)) + df.plot(kind="scatter", ax=ax, x="a", y="b", c="a", cmap="hsv") + + def test_plain_axes_make_axes_locatable(self): + # other examples + fig, ax = mpl.pyplot.subplots() + from mpl_toolkits.axes_grid1 import make_axes_locatable + + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + Series(np.random.default_rng(2).random(10)).plot(ax=ax) + Series(np.random.default_rng(2).random(10)).plot(ax=cax) + + def test_plain_axes_make_inset_axes(self): + fig, ax = mpl.pyplot.subplots() + from mpl_toolkits.axes_grid1.inset_locator import inset_axes + + iax = inset_axes(ax, width="30%", height=1.0, loc=3) + Series(np.random.default_rng(2).random(10)).plot(ax=ax) + Series(np.random.default_rng(2).random(10)).plot(ax=iax) + + @pytest.mark.parametrize("method", ["line", "barh", "bar"]) + def test_secondary_axis_font_size(self, method): + # GH: 12565 + df = ( + DataFrame( + np.random.default_rng(2).standard_normal((15, 2)), columns=list("AB") + ) + .assign(C=lambda df: df.B.cumsum()) + .assign(D=lambda df: df.C * 1.1) + ) + + fontsize = 20 + sy = ["C", "D"] + + kwargs = {"secondary_y": sy, "fontsize": fontsize, "mark_right": True} + ax = getattr(df.plot, method)(**kwargs) + _check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize) + + def test_x_string_values_ticks(self): + # Test if string plot index have a fixed xtick position + # GH: 7612, GH: 22334 + df = DataFrame( + { + "sales": [3, 2, 3], + "visits": [20, 42, 28], + "day": ["Monday", "Tuesday", "Wednesday"], + } + ) + ax = df.plot.area(x="day") + ax.set_xlim(-1, 3) + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + labels_position = dict(zip(xticklabels, ax.get_xticks())) + # Testing if the label stayed at the right position + assert labels_position["Monday"] == 0.0 + assert labels_position["Tuesday"] == 1.0 + assert labels_position["Wednesday"] == 2.0 + + def test_x_multiindex_values_ticks(self): + # Test if multiindex plot index have a fixed xtick position + # GH: 15912 + index = MultiIndex.from_product([[2012, 2013], [1, 2]]) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), + columns=["A", "B"], + index=index, + ) + ax = df.plot() + ax.set_xlim(-1, 4) + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + labels_position = dict(zip(xticklabels, ax.get_xticks())) + # Testing if the label stayed at the right position + assert labels_position["(2012, 1)"] == 0.0 + assert labels_position["(2012, 2)"] == 1.0 + assert labels_position["(2013, 1)"] == 2.0 + assert labels_position["(2013, 2)"] == 3.0 + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_xlim_plot_line(self, kind): + # test if xlim is set correctly in plot.line and plot.area + # GH 27686 + df = DataFrame([2, 4], index=[1, 2]) + ax = df.plot(kind=kind) + xlims = ax.get_xlim() + assert xlims[0] < 1 + assert xlims[1] > 2 + + def test_xlim_plot_line_correctly_in_mixed_plot_type(self): + # test if xlim is set correctly when ax contains multiple different kinds + # of plots, GH 27686 + fig, ax = mpl.pyplot.subplots() + + indexes = ["k1", "k2", "k3", "k4"] + df = DataFrame( + { + "s1": [1000, 2000, 1500, 2000], + "s2": [900, 1400, 2000, 3000], + "s3": [1500, 1500, 1600, 1200], + "secondary_y": [1, 3, 4, 3], + }, + index=indexes, + ) + df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False) + df[["secondary_y"]].plot(ax=ax, secondary_y=True) + + xlims = ax.get_xlim() + assert xlims[0] < 0 + assert xlims[1] > 3 + + # make sure axis labels are plotted correctly as well + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + assert xticklabels == indexes + + def test_plot_no_rows(self): + # GH 27758 + df = DataFrame(columns=["foo"], dtype=int) + assert df.empty + ax = df.plot() + assert len(ax.get_lines()) == 1 + line = ax.get_lines()[0] + assert len(line.get_xdata()) == 0 + assert len(line.get_ydata()) == 0 + + def test_plot_no_numeric_data(self): + df = DataFrame(["a", "b", "c"]) + with pytest.raises(TypeError, match="no numeric data to plot"): + df.plot() + + @pytest.mark.parametrize( + "kind", ("line", "bar", "barh", "hist", "kde", "density", "area", "pie") + ) + def test_group_subplot(self, kind): + pytest.importorskip("scipy") + d = { + "a": np.arange(10), + "b": np.arange(10) + 1, + "c": np.arange(10) + 1, + "d": np.arange(10), + "e": np.arange(10), + } + df = DataFrame(d) + + axes = df.plot(subplots=[("b", "e"), ("c", "d")], kind=kind) + assert len(axes) == 3 # 2 groups + single column a + + expected_labels = (["b", "e"], ["c", "d"], ["a"]) + for ax, labels in zip(axes, expected_labels): + if kind != "pie": + _check_legend_labels(ax, labels=labels) + if kind == "line": + assert len(ax.lines) == len(labels) + + def test_group_subplot_series_notimplemented(self): + ser = Series(range(1)) + msg = "An iterable subplots for a Series" + with pytest.raises(NotImplementedError, match=msg): + ser.plot(subplots=[("a",)]) + + def test_group_subplot_multiindex_notimplemented(self): + df = DataFrame(np.eye(2), columns=MultiIndex.from_tuples([(0, 1), (1, 2)])) + msg = "An iterable subplots for a DataFrame with a MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + df.plot(subplots=[(0, 1)]) + + def test_group_subplot_nonunique_cols_notimplemented(self): + df = DataFrame(np.eye(2), columns=["a", "a"]) + msg = "An iterable subplots for a DataFrame with non-unique" + with pytest.raises(NotImplementedError, match=msg): + df.plot(subplots=[("a",)]) + + @pytest.mark.parametrize( + "subplots, expected_msg", + [ + (123, "subplots should be a bool or an iterable"), + ("a", "each entry should be a list/tuple"), # iterable of non-iterable + ((1,), "each entry should be a list/tuple"), # iterable of non-iterable + (("a",), "each entry should be a list/tuple"), # iterable of strings + ], + ) + def test_group_subplot_bad_input(self, subplots, expected_msg): + # Make sure error is raised when subplots is not a properly + # formatted iterable. Only iterables of iterables are permitted, and + # entries should not be strings. + d = {"a": np.arange(10), "b": np.arange(10)} + df = DataFrame(d) + + with pytest.raises(ValueError, match=expected_msg): + df.plot(subplots=subplots) + + def test_group_subplot_invalid_column_name(self): + d = {"a": np.arange(10), "b": np.arange(10)} + df = DataFrame(d) + + with pytest.raises(ValueError, match=r"Column label\(s\) \['bad_name'\]"): + df.plot(subplots=[("a", "bad_name")]) + + def test_group_subplot_duplicated_column(self): + d = {"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)} + df = DataFrame(d) + + with pytest.raises(ValueError, match="should be in only one subplot"): + df.plot(subplots=[("a", "b"), ("a", "c")]) + + @pytest.mark.parametrize("kind", ("box", "scatter", "hexbin")) + def test_group_subplot_invalid_kind(self, kind): + d = {"a": np.arange(10), "b": np.arange(10)} + df = DataFrame(d) + with pytest.raises( + ValueError, match="When subplots is an iterable, kind must be one of" + ): + df.plot(subplots=[("a", "b")], kind=kind) + + @pytest.mark.parametrize( + "index_name, old_label, new_label", + [ + (None, "", "new"), + ("old", "old", "new"), + (None, "", ""), + (None, "", 1), + (None, "", [1, 2]), + ], + ) + @pytest.mark.parametrize("kind", ["line", "area", "bar"]) + def test_xlabel_ylabel_dataframe_single_plot( + self, kind, index_name, old_label, new_label + ): + # GH 9093 + df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) + df.index.name = index_name + + # default is the ylabel is not shown and xlabel is index name + ax = df.plot(kind=kind) + assert ax.get_xlabel() == old_label + assert ax.get_ylabel() == "" + + # old xlabel will be overridden and assigned ylabel will be used as ylabel + ax = df.plot(kind=kind, ylabel=new_label, xlabel=new_label) + assert ax.get_ylabel() == str(new_label) + assert ax.get_xlabel() == str(new_label) + + @pytest.mark.parametrize( + "xlabel, ylabel", + [ + (None, None), + ("X Label", None), + (None, "Y Label"), + ("X Label", "Y Label"), + ], + ) + @pytest.mark.parametrize("kind", ["scatter", "hexbin"]) + def test_xlabel_ylabel_dataframe_plane_plot(self, kind, xlabel, ylabel): + # GH 37001 + xcol = "Type A" + ycol = "Type B" + df = DataFrame([[1, 2], [2, 5]], columns=[xcol, ycol]) + + # default is the labels are column names + ax = df.plot(kind=kind, x=xcol, y=ycol, xlabel=xlabel, ylabel=ylabel) + assert ax.get_xlabel() == (xcol if xlabel is None else xlabel) + assert ax.get_ylabel() == (ycol if ylabel is None else ylabel) + + @pytest.mark.parametrize("secondary_y", (False, True)) + def test_secondary_y(self, secondary_y): + ax_df = DataFrame([0]).plot( + secondary_y=secondary_y, ylabel="Y", ylim=(0, 100), yticks=[99] + ) + for ax in ax_df.figure.axes: + if ax.yaxis.get_visible(): + assert ax.get_ylabel() == "Y" + assert ax.get_ylim() == (0, 100) + assert ax.get_yticks()[0] == 99 + + @pytest.mark.slow + def test_plot_no_warning(self): + # GH 55138 + # TODO(3.0): this can be removed once Period[B] deprecation is enforced + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + with tm.assert_produces_warning(False): + _ = df.plot() + _ = df.T.plot() + + +def _generate_4_axes_via_gridspec(): + import matplotlib.pyplot as plt + + gs = mpl.gridspec.GridSpec(2, 2) + ax_tl = plt.subplot(gs[0, 0]) + ax_ll = plt.subplot(gs[1, 0]) + ax_tr = plt.subplot(gs[0, 1]) + ax_lr = plt.subplot(gs[1, 1]) + + return gs, [ax_tl, ax_ll, ax_tr, ax_lr] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py new file mode 100644 index 0000000000000000000000000000000000000000..ff1edd323ef280cef5e7e79aa809906434a86407 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py @@ -0,0 +1,670 @@ +""" Test cases for DataFrame.plot """ +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_colors, + _check_plot_works, + _unpack_cycler, +) +from pandas.util.version import Version + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") +cm = pytest.importorskip("matplotlib.cm") + + +def _check_colors_box(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None): + if fliers_c is None: + fliers_c = "k" + _check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"])) + _check_colors(bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])) + _check_colors(bp["medians"], linecolors=[medians_c] * len(bp["medians"])) + _check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"])) + _check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"])) + + +class TestDataFrameColor: + @pytest.mark.parametrize( + "color", ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"] + ) + def test_mpl2_color_cycle_str(self, color): + # GH 15516 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"] + ) + _check_plot_works(df.plot, color=color) + + def test_color_single_series_list(self): + # GH 3486 + df = DataFrame({"A": [1, 2, 3]}) + _check_plot_works(df.plot, color=["red"]) + + @pytest.mark.parametrize("color", [(1, 0, 0), (1, 0, 0, 0.5)]) + def test_rgb_tuple_color(self, color): + # GH 16695 + df = DataFrame({"x": [1, 2], "y": [3, 4]}) + _check_plot_works(df.plot, x="x", y="y", color=color) + + def test_color_empty_string(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + with pytest.raises(ValueError, match="Invalid color argument:"): + df.plot(color="") + + def test_color_and_style_arguments(self): + df = DataFrame({"x": [1, 2], "y": [3, 4]}) + # passing both 'color' and 'style' arguments should be allowed + # if there is no color symbol in the style strings: + ax = df.plot(color=["red", "black"], style=["-", "--"]) + # check that the linestyles are correctly set: + linestyle = [line.get_linestyle() for line in ax.lines] + assert linestyle == ["-", "--"] + # check that the colors are correctly set: + color = [line.get_color() for line in ax.lines] + assert color == ["red", "black"] + # passing both 'color' and 'style' arguments should not be allowed + # if there is a color symbol in the style strings: + msg = ( + "Cannot pass 'style' string with a color symbol and 'color' keyword " + "argument. Please use one or the other or pass 'style' without a color " + "symbol" + ) + with pytest.raises(ValueError, match=msg): + df.plot(color=["red", "black"], style=["k-", "r--"]) + + @pytest.mark.parametrize( + "color, expected", + [ + ("green", ["green"] * 4), + (["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]), + ], + ) + def test_color_and_marker(self, color, expected): + # GH 21003 + df = DataFrame(np.random.default_rng(2).random((7, 4))) + ax = df.plot(color=color, style="d--") + # check colors + result = [i.get_color() for i in ax.lines] + assert result == expected + # check markers and linestyles + assert all(i.get_linestyle() == "--" for i in ax.lines) + assert all(i.get_marker() == "d" for i in ax.lines) + + def test_bar_colors(self): + default_colors = _unpack_cycler(plt.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.bar() + _check_colors(ax.patches[::5], facecolors=default_colors[:5]) + + def test_bar_colors_custom(self): + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.bar(color=custom_colors) + _check_colors(ax.patches[::5], facecolors=custom_colors) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_bar_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + ax = df.plot.bar(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)] + _check_colors(ax.patches[::5], facecolors=rgba_colors) + + def test_bar_colors_single_col(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.loc[:, [0]].plot.bar(color="DodgerBlue") + _check_colors([ax.patches[0]], facecolors=["DodgerBlue"]) + + def test_bar_colors_green(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(kind="bar", color="green") + _check_colors(ax.patches[::5], facecolors=["green"] * 5) + + def test_bar_user_colors(self): + df = DataFrame( + {"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]} + ) + # This should *only* work when `y` is specified, else + # we use one color per column + ax = df.plot.bar(y="A", color=df["color"]) + result = [p.get_facecolor() for p in ax.patches] + expected = [ + (1.0, 0.0, 0.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (1.0, 0.0, 0.0, 1.0), + ] + assert result == expected + + def test_if_scatterplot_colorbar_affects_xaxis_visibility(self): + # addressing issue #10611, to ensure colobar does not + # interfere with x-axis label and ticklabels with + # ipython inline backend. + random_array = np.random.default_rng(2).random((10, 3)) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) + + ax1 = df.plot.scatter(x="A label", y="B label") + ax2 = df.plot.scatter(x="A label", y="B label", c="C label") + + vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()] + vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()] + assert vis1 == vis2 + + vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()] + vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()] + assert vis1 == vis2 + + assert ( + ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible() + ) + + def test_if_hexbin_xaxis_label_is_visible(self): + # addressing issue #10678, to ensure colobar does not + # interfere with x-axis label and ticklabels with + # ipython inline backend. + random_array = np.random.default_rng(2).random((10, 3)) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) + + ax = df.plot.hexbin("A label", "B label", gridsize=12) + assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels()) + assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels()) + assert ax.xaxis.get_label().get_visible() + + def test_if_scatterplot_colorbars_are_next_to_parent_axes(self): + random_array = np.random.default_rng(2).random((10, 3)) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) + + fig, axes = plt.subplots(1, 2) + df.plot.scatter("A label", "B label", c="C label", ax=axes[0]) + df.plot.scatter("A label", "B label", c="C label", ax=axes[1]) + plt.tight_layout() + + points = np.array([ax.get_position().get_points() for ax in fig.axes]) + axes_x_coords = points[:, :, 0] + parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :] + colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :] + assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all() + + @pytest.mark.parametrize("cmap", [None, "Greys"]) + def test_scatter_with_c_column_name_with_colors(self, cmap): + # https://github.com/pandas-dev/pandas/issues/34316 + + df = DataFrame( + [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]], + columns=["length", "width"], + ) + df["species"] = ["r", "r", "g", "g", "b"] + if cmap is not None: + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + ax = df.plot.scatter(x=0, y=1, cmap=cmap, c="species") + else: + ax = df.plot.scatter(x=0, y=1, c="species", cmap=cmap) + assert ax.collections[0].colorbar is None + + def test_scatter_colors(self): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]}) + with pytest.raises(TypeError, match="Specify exactly one of `c` and `color`"): + df.plot.scatter(x="a", y="b", c="c", color="green") + + def test_scatter_colors_not_raising_warnings(self): + # GH-53908. Do not raise UserWarning: No data for colormapping + # provided via 'c'. Parameters 'cmap' will be ignored + df = DataFrame({"x": [1, 2, 3], "y": [1, 2, 3]}) + with tm.assert_produces_warning(None): + df.plot.scatter(x="x", y="y", c="b") + + def test_scatter_colors_default(self): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]}) + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + ax = df.plot.scatter(x="a", y="b", c="c") + tm.assert_numpy_array_equal( + ax.collections[0].get_facecolor()[0], + np.array(mpl.colors.ColorConverter.to_rgba(default_colors[0])), + ) + + def test_scatter_colors_white(self): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]}) + ax = df.plot.scatter(x="a", y="b", color="white") + tm.assert_numpy_array_equal( + ax.collections[0].get_facecolor()[0], + np.array([1, 1, 1, 1], dtype=np.float64), + ) + + def test_scatter_colorbar_different_cmap(self): + # GH 33389 + df = DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]}) + df["x2"] = df["x"] + 1 + + _, ax = plt.subplots() + df.plot("x", "y", c="c", kind="scatter", cmap="cividis", ax=ax) + df.plot("x2", "y", c="c", kind="scatter", cmap="magma", ax=ax) + + assert ax.collections[0].cmap.name == "cividis" + assert ax.collections[1].cmap.name == "magma" + + def test_line_colors(self): + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + ax = df.plot(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + + plt.close("all") + + ax2 = df.plot(color=custom_colors) + lines2 = ax2.get_lines() + + for l1, l2 in zip(ax.get_lines(), lines2): + assert l1.get_color() == l2.get_color() + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_line_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + _check_colors(ax.get_lines(), linecolors=rgba_colors) + + def test_line_colors_single_col(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # make color a list if plotting one column frame + # handles cases like df.plot(color='DodgerBlue') + ax = df.loc[:, [0]].plot(color="DodgerBlue") + _check_colors(ax.lines, linecolors=["DodgerBlue"]) + + def test_line_colors_single_color(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(color="red") + _check_colors(ax.get_lines(), linecolors=["red"] * 5) + + def test_line_colors_hex(self): + # GH 10299 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"] + ax = df.plot(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + + def test_dont_modify_colors(self): + colors = ["r", "g", "b"] + DataFrame(np.random.default_rng(2).random((10, 2))).plot(color=colors) + assert len(colors) == 3 + + def test_line_colors_and_styles_subplots(self): + # GH 9894 + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + axes = df.plot(subplots=True) + for ax, c in zip(axes, list(default_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("color", ["k", "green"]) + def test_line_colors_and_styles_subplots_single_color_str(self, color): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + axes = df.plot(subplots=True, color=color) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=[color]) + + @pytest.mark.parametrize("color", ["rgcby", list("rgcby")]) + def test_line_colors_and_styles_subplots_custom_colors(self, color): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + axes = df.plot(color=color, subplots=True) + for ax, c in zip(axes, list(color)): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_line_colors_and_styles_subplots_colormap_hex(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # GH 10299 + custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"] + axes = df.plot(color=custom_colors, subplots=True) + for ax, c in zip(axes, list(custom_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("cmap", ["jet", cm.jet]) + def test_line_colors_and_styles_subplots_colormap_subplot(self, cmap): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + axes = df.plot(colormap=cmap, subplots=True) + for ax, c in zip(axes, rgba_colors): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_line_colors_and_styles_subplots_single_col(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # make color a list if plotting one column frame + # handles cases like df.plot(color='DodgerBlue') + axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True) + _check_colors(axes[0].lines, linecolors=["DodgerBlue"]) + + def test_line_colors_and_styles_subplots_single_char(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # single character style + axes = df.plot(style="r", subplots=True) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=["r"]) + + def test_line_colors_and_styles_subplots_list_styles(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # list of styles + styles = list("rgcby") + axes = df.plot(style=styles, subplots=True) + for ax, c in zip(axes, styles): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_area_colors(self): + from matplotlib.collections import PolyCollection + + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).random((5, 5))) + + ax = df.plot.area(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + _check_colors(poly, facecolors=custom_colors) + + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, facecolors=custom_colors) + + for h in handles: + assert h.get_alpha() is None + + def test_area_colors_poly(self): + from matplotlib import cm + from matplotlib.collections import PolyCollection + + df = DataFrame(np.random.default_rng(2).random((5, 5))) + ax = df.plot.area(colormap="jet") + jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + _check_colors(ax.get_lines(), linecolors=jet_colors) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + _check_colors(poly, facecolors=jet_colors) + + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, facecolors=jet_colors) + for h in handles: + assert h.get_alpha() is None + + def test_area_colors_stacked_false(self): + from matplotlib import cm + from matplotlib.collections import PolyCollection + + df = DataFrame(np.random.default_rng(2).random((5, 5))) + jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + # When stacked=False, alpha is set to 0.5 + ax = df.plot.area(colormap=cm.jet, stacked=False) + _check_colors(ax.get_lines(), linecolors=jet_colors) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors] + _check_colors(poly, facecolors=jet_with_alpha) + + handles, _ = ax.get_legend_handles_labels() + linecolors = jet_with_alpha + _check_colors(handles[: len(jet_colors)], linecolors=linecolors) + for h in handles: + assert h.get_alpha() == 0.5 + + def test_hist_colors(self): + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.hist() + _check_colors(ax.patches[::10], facecolors=default_colors[:5]) + + def test_hist_colors_single_custom(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + custom_colors = "rgcby" + ax = df.plot.hist(color=custom_colors) + _check_colors(ax.patches[::10], facecolors=custom_colors) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_hist_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.hist(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)] + _check_colors(ax.patches[::10], facecolors=rgba_colors) + + def test_hist_colors_single_col(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.loc[:, [0]].plot.hist(color="DodgerBlue") + _check_colors([ax.patches[0]], facecolors=["DodgerBlue"]) + + def test_hist_colors_single_color(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(kind="hist", color="green") + _check_colors(ax.patches[::10], facecolors=["green"] * 5) + + def test_kde_colors(self): + pytest.importorskip("scipy") + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).random((5, 5))) + + ax = df.plot.kde(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_kde_colors_cmap(self, colormap): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.kde(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + _check_colors(ax.get_lines(), linecolors=rgba_colors) + + def test_kde_colors_and_styles_subplots(self): + pytest.importorskip("scipy") + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + axes = df.plot(kind="kde", subplots=True) + for ax, c in zip(axes, list(default_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("colormap", ["k", "red"]) + def test_kde_colors_and_styles_subplots_single_col_str(self, colormap): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + axes = df.plot(kind="kde", color=colormap, subplots=True) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=[colormap]) + + def test_kde_colors_and_styles_subplots_custom_color(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + custom_colors = "rgcby" + axes = df.plot(kind="kde", color=custom_colors, subplots=True) + for ax, c in zip(axes, list(custom_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_kde_colors_and_styles_subplots_cmap(self, colormap): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + axes = df.plot(kind="kde", colormap=colormap, subplots=True) + for ax, c in zip(axes, rgba_colors): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_kde_colors_and_styles_subplots_single_col(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # make color a list if plotting one column frame + # handles cases like df.plot(color='DodgerBlue') + axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True) + _check_colors(axes[0].lines, linecolors=["DodgerBlue"]) + + def test_kde_colors_and_styles_subplots_single_char(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # list of styles + # single character style + axes = df.plot(kind="kde", style="r", subplots=True) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=["r"]) + + def test_kde_colors_and_styles_subplots_list(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # list of styles + styles = list("rgcby") + axes = df.plot(kind="kde", style=styles, subplots=True) + for ax, c in zip(axes, styles): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_boxplot_colors(self): + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + bp = df.plot.box(return_type="dict") + _check_colors_box( + bp, + default_colors[0], + default_colors[0], + default_colors[2], + default_colors[0], + ) + + def test_boxplot_colors_dict_colors(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + dict_colors = { + "boxes": "#572923", + "whiskers": "#982042", + "medians": "#804823", + "caps": "#123456", + } + bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict") + _check_colors_box( + bp, + dict_colors["boxes"], + dict_colors["whiskers"], + dict_colors["medians"], + dict_colors["caps"], + "r", + ) + + def test_boxplot_colors_default_color(self): + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # partial colors + dict_colors = {"whiskers": "c", "medians": "m"} + bp = df.plot.box(color=dict_colors, return_type="dict") + _check_colors_box(bp, default_colors[0], "c", "m", default_colors[0]) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_boxplot_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + bp = df.plot.box(colormap=colormap, return_type="dict") + jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)] + _check_colors_box( + bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0] + ) + + def test_boxplot_colors_single(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # string color is applied to all artists except fliers + bp = df.plot.box(color="DodgerBlue", return_type="dict") + _check_colors_box(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue") + + def test_boxplot_colors_tuple(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # tuple is also applied to all artists except fliers + bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict") + _check_colors_box(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456") + + def test_boxplot_colors_invalid(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + msg = re.escape( + "color dict contains invalid key 'xxxx'. The key must be either " + "['boxes', 'whiskers', 'medians', 'caps']" + ) + with pytest.raises(ValueError, match=msg): + # Color contains invalid key results in ValueError + df.plot.box(color={"boxes": "red", "xxxx": "blue"}) + + def test_default_color_cycle(self): + import cycler + + colors = list("rgbk") + plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + ax = df.plot() + + expected = _unpack_cycler(plt.rcParams)[:3] + _check_colors(ax.get_lines(), linecolors=expected) + + def test_no_color_bar(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + ax = df.plot.hexbin(x="A", y="B", colorbar=None) + assert ax.collections[0].colorbar is None + + def test_mixing_cmap_and_colormap_raises(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + msg = "Only specify one of `cmap` and `colormap`" + with pytest.raises(TypeError, match=msg): + df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn") + + def test_passed_bar_colors(self): + color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)] + colormap = mpl.colors.ListedColormap(color_tuples) + barplot = DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap) + assert color_tuples == [c.get_facecolor() for c in barplot.patches] + + def test_rcParams_bar_colors(self): + color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)] + with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}): + barplot = DataFrame([[1, 2, 3]]).plot(kind="bar") + assert color_tuples == [c.get_facecolor() for c in barplot.patches] + + def test_colors_of_columns_with_same_name(self): + # ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136 + # Creating a DataFrame with duplicate column labels and testing colors of them. + df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]}) + df1 = DataFrame({"a": [2, 4, 6]}) + df_concat = pd.concat([df, df1], axis=1) + result = df_concat.plot() + legend = result.get_legend() + if Version(mpl.__version__) < Version("3.7"): + handles = legend.legendHandles + else: + handles = legend.legend_handles + for legend, line in zip(handles, result.lines): + assert legend.get_color() == line.get_color() + + def test_invalid_colormap(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 2)), columns=["A", "B"] + ) + msg = "(is not a valid value)|(is not a known colormap)" + with pytest.raises((ValueError, KeyError), match=msg): + df.plot(colormap="invalid_colormap") + + def test_dataframe_none_color(self): + # GH51953 + df = DataFrame([[1, 2, 3]]) + ax = df.plot(color=None) + expected = _unpack_cycler(mpl.pyplot.rcParams)[:3] + _check_colors(ax.get_lines(), linecolors=expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..f1924185a3df1cae2f0df89ec84225cd68f8fa6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py @@ -0,0 +1,72 @@ +""" Test cases for DataFrame.plot """ + +import pytest + +from pandas import DataFrame +from pandas.tests.plotting.common import _check_visible + +pytest.importorskip("matplotlib") + + +class TestDataFramePlotsGroupby: + def _assert_ytickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + _check_visible(ax.get_yticklabels(), visible=exp) + + def _assert_xtickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + _check_visible(ax.get_xticklabels(), visible=exp) + + @pytest.mark.parametrize( + "kwargs, expected", + [ + # behavior without keyword + ({}, [True, False, True, False]), + # set sharey=True should be identical + ({"sharey": True}, [True, False, True, False]), + # sharey=False, all yticklabels should be visible + ({"sharey": False}, [True, True, True, True]), + ], + ) + def test_groupby_boxplot_sharey(self, kwargs, expected): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharey can now be switched check whether the right + # pair of axes is turned on or off + df = DataFrame( + { + "a": [-1.43, -0.15, -3.70, -1.43, -0.14], + "b": [0.56, 0.84, 0.29, 0.56, 0.85], + "c": [0, 1, 2, 3, 1], + }, + index=[0, 1, 2, 3, 4], + ) + axes = df.groupby("c").boxplot(**kwargs) + self._assert_ytickslabels_visibility(axes, expected) + + @pytest.mark.parametrize( + "kwargs, expected", + [ + # behavior without keyword + ({}, [True, True, True, True]), + # set sharex=False should be identical + ({"sharex": False}, [True, True, True, True]), + # sharex=True, xticklabels should be visible + # only for bottom plots + ({"sharex": True}, [False, False, True, True]), + ], + ) + def test_groupby_boxplot_sharex(self, kwargs, expected): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharex can now be switched check whether the right + # pair of axes is turned on or off + + df = DataFrame( + { + "a": [-1.43, -0.15, -3.70, -1.43, -0.14], + "b": [0.56, 0.84, 0.29, 0.56, 0.85], + "c": [0, 1, 2, 3, 1], + }, + index=[0, 1, 2, 3, 4], + ) + axes = df.groupby("c").boxplot(**kwargs) + self._assert_xtickslabels_visibility(axes, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py new file mode 100644 index 0000000000000000000000000000000000000000..402a4b9531e5d4857d0d6e9d7cda2c002d0469d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py @@ -0,0 +1,272 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + date_range, +) +from pandas.tests.plotting.common import ( + _check_legend_labels, + _check_legend_marker, + _check_text_labels, +) +from pandas.util.version import Version + +mpl = pytest.importorskip("matplotlib") + + +class TestFrameLegend: + @pytest.mark.xfail( + reason=( + "Open bug in matplotlib " + "https://github.com/matplotlib/matplotlib/issues/11357" + ) + ) + def test_mixed_yerr(self): + # https://github.com/pandas-dev/pandas/issues/39522 + from matplotlib.collections import LineCollection + from matplotlib.lines import Line2D + + df = DataFrame([{"x": 1, "a": 1, "b": 1}, {"x": 2, "a": 2, "b": 3}]) + + ax = df.plot("x", "a", c="orange", yerr=0.1, label="orange") + df.plot("x", "b", c="blue", yerr=None, ax=ax, label="blue") + + legend = ax.get_legend() + if Version(mpl.__version__) < Version("3.7"): + result_handles = legend.legendHandles + else: + result_handles = legend.legend_handles + + assert isinstance(result_handles[0], LineCollection) + assert isinstance(result_handles[1], Line2D) + + def test_legend_false(self): + # https://github.com/pandas-dev/pandas/issues/40044 + df = DataFrame({"a": [1, 1], "b": [2, 3]}) + df2 = DataFrame({"d": [2.5, 2.5]}) + + ax = df.plot(legend=True, color={"a": "blue", "b": "green"}, secondary_y="b") + df2.plot(legend=True, color={"d": "red"}, ax=ax) + legend = ax.get_legend() + if Version(mpl.__version__) < Version("3.7"): + handles = legend.legendHandles + else: + handles = legend.legend_handles + result = [handle.get_color() for handle in handles] + expected = ["blue", "green", "red"] + assert result == expected + + @pytest.mark.parametrize("kind", ["line", "bar", "barh", "kde", "area", "hist"]) + def test_df_legend_labels(self, kind): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"]) + df2 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"] + ) + df3 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["g", "h", "i"] + ) + df4 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["j", "k", "l"] + ) + + ax = df.plot(kind=kind, legend=True) + _check_legend_labels(ax, labels=df.columns) + + ax = df2.plot(kind=kind, legend=False, ax=ax) + _check_legend_labels(ax, labels=df.columns) + + ax = df3.plot(kind=kind, legend=True, ax=ax) + _check_legend_labels(ax, labels=df.columns.union(df3.columns)) + + ax = df4.plot(kind=kind, legend="reverse", ax=ax) + expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns)) + _check_legend_labels(ax, labels=expected) + + def test_df_legend_labels_secondary_y(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"]) + df2 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"] + ) + df3 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["g", "h", "i"] + ) + # Secondary Y + ax = df.plot(legend=True, secondary_y="b") + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df2.plot(legend=False, ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df3.plot(kind="bar", legend=True, secondary_y="h", ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"]) + + def test_df_legend_labels_time_series(self): + # Time Series + pytest.importorskip("scipy") + ind = date_range("1/1/2014", periods=3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["a", "b", "c"], + index=ind, + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["d", "e", "f"], + index=ind, + ) + df3 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["g", "h", "i"], + index=ind, + ) + ax = df.plot(legend=True, secondary_y="b") + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df2.plot(legend=False, ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df3.plot(legend=True, ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"]) + + def test_df_legend_labels_time_series_scatter(self): + # Time Series + pytest.importorskip("scipy") + ind = date_range("1/1/2014", periods=3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["a", "b", "c"], + index=ind, + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["d", "e", "f"], + index=ind, + ) + df3 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["g", "h", "i"], + index=ind, + ) + # scatter + ax = df.plot.scatter(x="a", y="b", label="data1") + _check_legend_labels(ax, labels=["data1"]) + ax = df2.plot.scatter(x="d", y="e", legend=False, label="data2", ax=ax) + _check_legend_labels(ax, labels=["data1"]) + ax = df3.plot.scatter(x="g", y="h", label="data3", ax=ax) + _check_legend_labels(ax, labels=["data1", "data3"]) + + def test_df_legend_labels_time_series_no_mutate(self): + pytest.importorskip("scipy") + ind = date_range("1/1/2014", periods=3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["a", "b", "c"], + index=ind, + ) + # ensure label args pass through and + # index name does not mutate + # column names don't mutate + df5 = df.set_index("a") + ax = df5.plot(y="b") + _check_legend_labels(ax, labels=["b"]) + ax = df5.plot(y="b", label="LABEL_b") + _check_legend_labels(ax, labels=["LABEL_b"]) + _check_text_labels(ax.xaxis.get_label(), "a") + ax = df5.plot(y="c", label="LABEL_c", ax=ax) + _check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"]) + assert df5.columns.tolist() == ["b", "c"] + + def test_missing_marker_multi_plots_on_same_ax(self): + # GH 18222 + df = DataFrame(data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"]) + _, ax = mpl.pyplot.subplots(nrows=1, ncols=3) + # Left plot + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0]) + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0]) + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0]) + _check_legend_labels(ax[0], labels=["r", "g", "b"]) + _check_legend_marker(ax[0], expected_markers=["o", "x", "o"]) + # Center plot + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1]) + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1]) + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1]) + _check_legend_labels(ax[1], labels=["b", "r", "g"]) + _check_legend_marker(ax[1], expected_markers=["o", "o", "x"]) + # Right plot + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2]) + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2]) + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2]) + _check_legend_labels(ax[2], labels=["g", "b", "r"]) + _check_legend_marker(ax[2], expected_markers=["x", "o", "o"]) + + def test_legend_name(self): + multi = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + columns=[np.array(["a", "a", "b", "b"]), np.array(["x", "y", "x", "y"])], + ) + multi.columns.names = ["group", "individual"] + + ax = multi.plot() + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "group,individual") + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(legend=True, ax=ax) + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "group,individual") + + df.columns.name = "new" + ax = df.plot(legend=False, ax=ax) + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "group,individual") + + ax = df.plot(legend=True, ax=ax) + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "new") + + @pytest.mark.parametrize( + "kind", + [ + "line", + "bar", + "barh", + pytest.param("kde", marks=td.skip_if_no("scipy")), + "area", + "hist", + ], + ) + def test_no_legend(self, kind): + df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"]) + ax = df.plot(kind=kind, legend=False) + _check_legend_labels(ax, visible=False) + + def test_missing_markers_legend(self): + # 14958 + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), columns=["A", "B", "C"] + ) + ax = df.plot(y=["A"], marker="x", linestyle="solid") + df.plot(y=["B"], marker="o", linestyle="dotted", ax=ax) + df.plot(y=["C"], marker="<", linestyle="dotted", ax=ax) + + _check_legend_labels(ax, labels=["A", "B", "C"]) + _check_legend_marker(ax, expected_markers=["x", "o", "<"]) + + def test_missing_markers_legend_using_style(self): + # 14563 + df = DataFrame( + { + "A": [1, 2, 3, 4, 5, 6], + "B": [2, 4, 1, 3, 2, 4], + "C": [3, 3, 2, 6, 4, 2], + "X": [1, 2, 3, 4, 5, 6], + } + ) + + _, ax = mpl.pyplot.subplots() + for kind in "ABC": + df.plot("X", kind, label=kind, ax=ax, style=".") + + _check_legend_labels(ax, labels=["A", "B", "C"]) + _check_legend_marker(ax, expected_markers=[".", ".", "."]) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py new file mode 100644 index 0000000000000000000000000000000000000000..4d8d8fa4cdee38d568d099019e89114fb0cdb4e9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py @@ -0,0 +1,752 @@ +""" Test cases for DataFrame.plot """ + +import string + +import numpy as np +import pytest + +from pandas.compat import is_platform_linux +from pandas.compat.numpy import np_version_gte1p24 + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_box_return_type, + _check_legend_labels, + _check_ticks_props, + _check_visible, + _flatten_visible, +) + +from pandas.io.formats.printing import pprint_thing + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestDataFramePlotsSubplots: + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"]) + def test_subplots(self, kind): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True) + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + assert axes.shape == (3,) + + for ax, column in zip(axes, df.columns): + _check_legend_labels(ax, labels=[pprint_thing(column)]) + + for ax in axes[:-2]: + _check_visible(ax.xaxis) # xaxis must be visible for grid + _check_visible(ax.get_xticklabels(), visible=False) + if kind != "bar": + # change https://github.com/pandas-dev/pandas/issues/26714 + _check_visible(ax.get_xticklabels(minor=True), visible=False) + _check_visible(ax.xaxis.get_label(), visible=False) + _check_visible(ax.get_yticklabels()) + + _check_visible(axes[-1].xaxis) + _check_visible(axes[-1].get_xticklabels()) + _check_visible(axes[-1].get_xticklabels(minor=True)) + _check_visible(axes[-1].xaxis.get_label()) + _check_visible(axes[-1].get_yticklabels()) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"]) + def test_subplots_no_share_x(self, kind): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + axes = df.plot(kind=kind, subplots=True, sharex=False) + for ax in axes: + _check_visible(ax.xaxis) + _check_visible(ax.get_xticklabels()) + _check_visible(ax.get_xticklabels(minor=True)) + _check_visible(ax.xaxis.get_label()) + _check_visible(ax.get_yticklabels()) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"]) + def test_subplots_no_legend(self, kind): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + axes = df.plot(kind=kind, subplots=True, legend=False) + for ax in axes: + assert ax.get_legend() is None + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_subplots_timeseries(self, kind): + idx = date_range(start="2014-07-01", freq="ME", periods=10) + df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx) + + axes = df.plot(kind=kind, subplots=True, sharex=True) + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + + for ax in axes[:-2]: + # GH 7801 + _check_visible(ax.xaxis) # xaxis must be visible for grid + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + _check_visible(ax.xaxis.get_label(), visible=False) + _check_visible(ax.get_yticklabels()) + + _check_visible(axes[-1].xaxis) + _check_visible(axes[-1].get_xticklabels()) + _check_visible(axes[-1].get_xticklabels(minor=True)) + _check_visible(axes[-1].xaxis.get_label()) + _check_visible(axes[-1].get_yticklabels()) + _check_ticks_props(axes, xrot=0) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_subplots_timeseries_rot(self, kind): + idx = date_range(start="2014-07-01", freq="ME", periods=10) + df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx) + axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7) + for ax in axes: + _check_visible(ax.xaxis) + _check_visible(ax.get_xticklabels()) + _check_visible(ax.get_xticklabels(minor=True)) + _check_visible(ax.xaxis.get_label()) + _check_visible(ax.get_yticklabels()) + _check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7) + + @pytest.mark.parametrize( + "col", ["numeric", "timedelta", "datetime_no_tz", "datetime_all_tz"] + ) + def test_subplots_timeseries_y_axis(self, col): + # GH16953 + data = { + "numeric": np.array([1, 2, 5]), + "timedelta": [ + pd.Timedelta(-10, unit="s"), + pd.Timedelta(10, unit="m"), + pd.Timedelta(10, unit="h"), + ], + "datetime_no_tz": [ + pd.to_datetime("2017-08-01 00:00:00"), + pd.to_datetime("2017-08-01 02:00:00"), + pd.to_datetime("2017-08-02 00:00:00"), + ], + "datetime_all_tz": [ + pd.to_datetime("2017-08-01 00:00:00", utc=True), + pd.to_datetime("2017-08-01 02:00:00", utc=True), + pd.to_datetime("2017-08-02 00:00:00", utc=True), + ], + "text": ["This", "should", "fail"], + } + testdata = DataFrame(data) + + ax = testdata.plot(y=col) + result = ax.get_lines()[0].get_data()[1] + expected = testdata[col].values + assert (result == expected).all() + + def test_subplots_timeseries_y_text_error(self): + # GH16953 + data = { + "numeric": np.array([1, 2, 5]), + "text": ["This", "should", "fail"], + } + testdata = DataFrame(data) + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + testdata.plot(y="text") + + @pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz") + def test_subplots_timeseries_y_axis_not_supported(self): + """ + This test will fail for: + period: + since period isn't yet implemented in ``select_dtypes`` + and because it will need a custom value converter + + tick formatter (as was done for x-axis plots) + + categorical: + because it will need a custom value converter + + tick formatter (also doesn't work for x-axis, as of now) + + datetime_mixed_tz: + because of the way how pandas handles ``Series`` of + ``datetime`` objects with different timezone, + generally converting ``datetime`` objects in a tz-aware + form could help with this problem + """ + data = { + "numeric": np.array([1, 2, 5]), + "period": [ + pd.Period("2017-08-01 00:00:00", freq="H"), + pd.Period("2017-08-01 02:00", freq="H"), + pd.Period("2017-08-02 00:00:00", freq="H"), + ], + "categorical": pd.Categorical( + ["c", "b", "a"], categories=["a", "b", "c"], ordered=False + ), + "datetime_mixed_tz": [ + pd.to_datetime("2017-08-01 00:00:00", utc=True), + pd.to_datetime("2017-08-01 02:00:00"), + pd.to_datetime("2017-08-02 00:00:00"), + ], + } + testdata = DataFrame(data) + ax_period = testdata.plot(x="numeric", y="period") + assert ( + ax_period.get_lines()[0].get_data()[1] == testdata["period"].values + ).all() + ax_categorical = testdata.plot(x="numeric", y="categorical") + assert ( + ax_categorical.get_lines()[0].get_data()[1] + == testdata["categorical"].values + ).all() + ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz") + assert ( + ax_datetime_mixed_tz.get_lines()[0].get_data()[1] + == testdata["datetime_mixed_tz"].values + ).all() + + @pytest.mark.parametrize( + "layout, exp_layout", + [ + [(2, 2), (2, 2)], + [(-1, 2), (2, 2)], + [(2, -1), (2, 2)], + [(1, 4), (1, 4)], + [(-1, 4), (1, 4)], + [(4, -1), (4, 1)], + ], + ) + def test_subplots_layout_multi_column(self, layout, exp_layout): + # GH 6667 + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + axes = df.plot(subplots=True, layout=layout) + _check_axes_shape(axes, axes_num=3, layout=exp_layout) + assert axes.shape == exp_layout + + def test_subplots_layout_multi_column_error(self): + # GH 6667 + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + msg = "Layout of 1x1 must be larger than required size 3" + + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, layout=(1, 1)) + + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, layout=(-1, -1)) + + @pytest.mark.parametrize( + "kwargs, expected_axes_num, expected_layout, expected_shape", + [ + ({}, 1, (1, 1), (1,)), + ({"layout": (3, 3)}, 1, (3, 3), (3, 3)), + ], + ) + def test_subplots_layout_single_column( + self, kwargs, expected_axes_num, expected_layout, expected_shape + ): + # GH 6667 + df = DataFrame( + np.random.default_rng(2).random((10, 1)), + index=list(string.ascii_letters[:10]), + ) + axes = df.plot(subplots=True, **kwargs) + _check_axes_shape( + axes, + axes_num=expected_axes_num, + layout=expected_layout, + ) + assert axes.shape == expected_shape + + @pytest.mark.slow + @pytest.mark.parametrize("idx", [range(5), date_range("1/1/2000", periods=5)]) + def test_subplots_warnings(self, idx): + # GH 9464 + with tm.assert_produces_warning(None): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 4)), index=idx) + df.plot(subplots=True, layout=(3, 2)) + + def test_subplots_multiple_axes(self): + # GH 5353, 6970, GH 7069 + fig, axes = mpl.pyplot.subplots(2, 3) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + assert returned.shape == (3,) + assert returned[0].figure is fig + # draw on second row + returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + assert returned.shape == (3,) + assert returned[0].figure is fig + _check_axes_shape(axes, axes_num=6, layout=(2, 3)) + + def test_subplots_multiple_axes_error(self): + # GH 5353, 6970, GH 7069 + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + msg = "The number of passed axes must be 3, the same as the output plot" + _, axes = mpl.pyplot.subplots(2, 3) + + with pytest.raises(ValueError, match=msg): + # pass different number of axes from required + df.plot(subplots=True, ax=axes) + + @pytest.mark.parametrize( + "layout, exp_layout", + [ + [(2, 1), (2, 2)], + [(2, -1), (2, 2)], + [(-1, 2), (2, 2)], + ], + ) + def test_subplots_multiple_axes_2_dim(self, layout, exp_layout): + # GH 5353, 6970, GH 7069 + # pass 2-dim axes and invalid layout + # invalid lauout should not affect to input and return value + # (show warning is tested in + # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes + _, axes = mpl.pyplot.subplots(2, 2) + df = DataFrame( + np.random.default_rng(2).random((10, 4)), + index=list(string.ascii_letters[:10]), + ) + with tm.assert_produces_warning(UserWarning): + returned = df.plot( + subplots=True, ax=axes, layout=layout, sharex=False, sharey=False + ) + _check_axes_shape(returned, axes_num=4, layout=exp_layout) + assert returned.shape == (4,) + + def test_subplots_multiple_axes_single_col(self): + # GH 5353, 6970, GH 7069 + # single column + _, axes = mpl.pyplot.subplots(1, 1) + df = DataFrame( + np.random.default_rng(2).random((10, 1)), + index=list(string.ascii_letters[:10]), + ) + + axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + assert axes.shape == (1,) + + def test_subplots_ts_share_axes(self): + # GH 3964 + _, axes = mpl.pyplot.subplots(3, 3, sharex=True, sharey=True) + mpl.pyplot.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 9)), + index=date_range(start="2014-07-01", freq="ME", periods=10), + ) + for i, ax in enumerate(axes.ravel()): + df[i].plot(ax=ax, fontsize=5) + + # Rows other than bottom should not be visible + for ax in axes[0:-1].ravel(): + _check_visible(ax.get_xticklabels(), visible=False) + + # Bottom row should be visible + for ax in axes[-1].ravel(): + _check_visible(ax.get_xticklabels(), visible=True) + + # First column should be visible + for ax in axes[[0, 1, 2], [0]].ravel(): + _check_visible(ax.get_yticklabels(), visible=True) + + # Other columns should not be visible + for ax in axes[[0, 1, 2], [1]].ravel(): + _check_visible(ax.get_yticklabels(), visible=False) + for ax in axes[[0, 1, 2], [2]].ravel(): + _check_visible(ax.get_yticklabels(), visible=False) + + def test_subplots_sharex_axes_existing_axes(self): + # GH 9158 + d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]} + df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14")) + + axes = df[["A", "B"]].plot(subplots=True) + df["C"].plot(ax=axes[0], secondary_y=True) + + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + for ax in axes.ravel(): + _check_visible(ax.get_yticklabels(), visible=True) + + def test_subplots_dup_columns(self): + # GH 10962 + df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa")) + axes = df.plot(subplots=True) + for ax in axes: + _check_legend_labels(ax, labels=["a"]) + assert len(ax.lines) == 1 + + def test_subplots_dup_columns_secondary_y(self): + # GH 10962 + df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa")) + axes = df.plot(subplots=True, secondary_y="a") + for ax in axes: + # (right) is only attached when subplots=False + _check_legend_labels(ax, labels=["a"]) + assert len(ax.lines) == 1 + + def test_subplots_dup_columns_secondary_y_no_subplot(self): + # GH 10962 + df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa")) + ax = df.plot(secondary_y="a") + _check_legend_labels(ax, labels=["a (right)"] * 5) + assert len(ax.lines) == 0 + assert len(ax.right_ax.lines) == 5 + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + def test_bar_log_no_subplots(self): + # GH3254, GH3298 matplotlib/matplotlib#1882, #1892 + # regressions in 1.2.1 + expected = np.array([0.1, 1.0, 10.0, 100]) + + # no subplots + df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5)) + ax = df.plot.bar(grid=True, log=True) + tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + def test_bar_log_subplots(self): + expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4]) + + ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar( + log=True, subplots=True + ) + + tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected) + tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected) + + def test_boxplot_subplots_return_type_default(self, hist_df): + df = hist_df + + # normal style: return_type=None + result = df.plot.box(subplots=True) + assert isinstance(result, Series) + _check_box_return_type( + result, None, expected_keys=["height", "weight", "category"] + ) + + @pytest.mark.parametrize("rt", ["dict", "axes", "both"]) + def test_boxplot_subplots_return_type(self, hist_df, rt): + df = hist_df + returned = df.plot.box(return_type=rt, subplots=True) + _check_box_return_type( + returned, + rt, + expected_keys=["height", "weight", "category"], + check_ax_title=False, + ) + + def test_df_subplots_patterns_minorticks(self): + # GH 10657 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=date_range("1/1/2000", periods=10), + columns=list("AB"), + ) + + # shared subplots + _, axes = plt.subplots(2, 1, sharex=True) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + # xaxis of 1st ax must be hidden + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[0].get_xticklabels(minor=True), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + _check_visible(axes[1].get_xticklabels(minor=True), visible=True) + + def test_df_subplots_patterns_minorticks_1st_ax_hidden(self): + # GH 10657 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=date_range("1/1/2000", periods=10), + columns=list("AB"), + ) + _, axes = plt.subplots(2, 1) + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + # xaxis of 1st ax must be hidden + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[0].get_xticklabels(minor=True), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + _check_visible(axes[1].get_xticklabels(minor=True), visible=True) + + def test_df_subplots_patterns_minorticks_not_shared(self): + # GH 10657 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=date_range("1/1/2000", periods=10), + columns=list("AB"), + ) + # not shared + _, axes = plt.subplots(2, 1) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + def test_subplots_sharex_false(self): + # test when sharex is set to False, two plots should have different + # labels, GH 25160 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + df.iloc[5:, 1] = np.nan + df.iloc[:5, 0] = np.nan + + _, axs = mpl.pyplot.subplots(2, 1) + df.plot.line(ax=axs, subplots=True, sharex=False) + + expected_ax1 = np.arange(4.5, 10, 0.5) + expected_ax2 = np.arange(-0.5, 5, 0.5) + + tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1) + tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2) + + def test_subplots_constrained_layout(self): + # GH 25261 + idx = date_range(start="now", periods=10) + df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx) + kwargs = {} + if hasattr(mpl.pyplot.Figure, "get_constrained_layout"): + kwargs["constrained_layout"] = True + _, axes = mpl.pyplot.subplots(2, **kwargs) + with tm.assert_produces_warning(None): + df.plot(ax=axes[0]) + with tm.ensure_clean(return_filelike=True) as path: + mpl.pyplot.savefig(path) + + @pytest.mark.parametrize( + "index_name, old_label, new_label", + [ + (None, "", "new"), + ("old", "old", "new"), + (None, "", ""), + (None, "", 1), + (None, "", [1, 2]), + ], + ) + @pytest.mark.parametrize("kind", ["line", "area", "bar"]) + def test_xlabel_ylabel_dataframe_subplots( + self, kind, index_name, old_label, new_label + ): + # GH 9093 + df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) + df.index.name = index_name + + # default is the ylabel is not shown and xlabel is index name + axes = df.plot(kind=kind, subplots=True) + assert all(ax.get_ylabel() == "" for ax in axes) + assert all(ax.get_xlabel() == old_label for ax in axes) + + # old xlabel will be overridden and assigned ylabel will be used as ylabel + axes = df.plot(kind=kind, ylabel=new_label, xlabel=new_label, subplots=True) + assert all(ax.get_ylabel() == str(new_label) for ax in axes) + assert all(ax.get_xlabel() == str(new_label) for ax in axes) + + @pytest.mark.parametrize( + "kwargs", + [ + # stacked center + {"kind": "bar", "stacked": True}, + {"kind": "bar", "stacked": True, "width": 0.9}, + {"kind": "barh", "stacked": True}, + {"kind": "barh", "stacked": True, "width": 0.9}, + # center + {"kind": "bar", "stacked": False}, + {"kind": "bar", "stacked": False, "width": 0.9}, + {"kind": "barh", "stacked": False}, + {"kind": "barh", "stacked": False, "width": 0.9}, + # subplots center + {"kind": "bar", "subplots": True}, + {"kind": "bar", "subplots": True, "width": 0.9}, + {"kind": "barh", "subplots": True}, + {"kind": "barh", "subplots": True, "width": 0.9}, + # align edge + {"kind": "bar", "stacked": True, "align": "edge"}, + {"kind": "bar", "stacked": True, "width": 0.9, "align": "edge"}, + {"kind": "barh", "stacked": True, "align": "edge"}, + {"kind": "barh", "stacked": True, "width": 0.9, "align": "edge"}, + {"kind": "bar", "stacked": False, "align": "edge"}, + {"kind": "bar", "stacked": False, "width": 0.9, "align": "edge"}, + {"kind": "barh", "stacked": False, "align": "edge"}, + {"kind": "barh", "stacked": False, "width": 0.9, "align": "edge"}, + {"kind": "bar", "subplots": True, "align": "edge"}, + {"kind": "bar", "subplots": True, "width": 0.9, "align": "edge"}, + {"kind": "barh", "subplots": True, "align": "edge"}, + {"kind": "barh", "subplots": True, "width": 0.9, "align": "edge"}, + ], + ) + def test_bar_align_multiple_columns(self, kwargs): + # GH2157 + df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5)) + self._check_bar_alignment(df, **kwargs) + + @pytest.mark.parametrize( + "kwargs", + [ + {"kind": "bar", "stacked": False}, + {"kind": "bar", "stacked": True}, + {"kind": "barh", "stacked": False}, + {"kind": "barh", "stacked": True}, + {"kind": "bar", "subplots": True}, + {"kind": "barh", "subplots": True}, + ], + ) + def test_bar_align_single_column(self, kwargs): + df = DataFrame(np.random.default_rng(2).standard_normal(5)) + self._check_bar_alignment(df, **kwargs) + + @pytest.mark.parametrize( + "kwargs", + [ + {"kind": "bar", "stacked": False}, + {"kind": "bar", "stacked": True}, + {"kind": "barh", "stacked": False}, + {"kind": "barh", "stacked": True}, + {"kind": "bar", "subplots": True}, + {"kind": "barh", "subplots": True}, + ], + ) + def test_bar_barwidth_position(self, kwargs): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + self._check_bar_alignment(df, width=0.9, position=0.2, **kwargs) + + @pytest.mark.parametrize("w", [1, 1.0]) + def test_bar_barwidth_position_int(self, w): + # GH 12979 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.bar(stacked=True, width=w) + ticks = ax.xaxis.get_ticklocs() + tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4])) + assert ax.get_xlim() == (-0.75, 4.75) + # check left-edge of bars + assert ax.patches[0].get_x() == -0.5 + assert ax.patches[-1].get_x() == 3.5 + + @pytest.mark.parametrize( + "kind, kwargs", + [ + ["bar", {"stacked": True}], + ["barh", {"stacked": False}], + ["barh", {"stacked": True}], + ["bar", {"subplots": True}], + ["barh", {"subplots": True}], + ], + ) + def test_bar_barwidth_position_int_width_1(self, kind, kwargs): + # GH 12979 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + self._check_bar_alignment(df, kind=kind, width=1, **kwargs) + + def _check_bar_alignment( + self, + df, + kind="bar", + stacked=False, + subplots=False, + align="center", + width=0.5, + position=0.5, + ): + axes = df.plot( + kind=kind, + stacked=stacked, + subplots=subplots, + align=align, + width=width, + position=position, + grid=True, + ) + + axes = _flatten_visible(axes) + + for ax in axes: + if kind == "bar": + axis = ax.xaxis + ax_min, ax_max = ax.get_xlim() + min_edge = min(p.get_x() for p in ax.patches) + max_edge = max(p.get_x() + p.get_width() for p in ax.patches) + elif kind == "barh": + axis = ax.yaxis + ax_min, ax_max = ax.get_ylim() + min_edge = min(p.get_y() for p in ax.patches) + max_edge = max(p.get_y() + p.get_height() for p in ax.patches) + else: + raise ValueError + + # GH 7498 + # compare margins between lim and bar edges + tm.assert_almost_equal(ax_min, min_edge - 0.25) + tm.assert_almost_equal(ax_max, max_edge + 0.25) + + p = ax.patches[0] + if kind == "bar" and (stacked is True or subplots is True): + edge = p.get_x() + center = edge + p.get_width() * position + elif kind == "bar" and stacked is False: + center = p.get_x() + p.get_width() * len(df.columns) * position + edge = p.get_x() + elif kind == "barh" and (stacked is True or subplots is True): + center = p.get_y() + p.get_height() * position + edge = p.get_y() + elif kind == "barh" and stacked is False: + center = p.get_y() + p.get_height() * len(df.columns) * position + edge = p.get_y() + else: + raise ValueError + + # Check the ticks locates on integer + assert (axis.get_ticklocs() == np.arange(len(df))).all() + + if align == "center": + # Check whether the bar locates on center + tm.assert_almost_equal(axis.get_ticklocs()[0], center) + elif align == "edge": + # Check whether the bar's edge starts from the tick + tm.assert_almost_equal(axis.get_ticklocs()[0], edge) + else: + raise ValueError + + return axes diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py new file mode 100644 index 0000000000000000000000000000000000000000..a9250fa8347cc04fa34c28b016e1fb27d837284f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py @@ -0,0 +1,342 @@ +import re + +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_plot_works, + get_x_axis, + get_y_axis, +) + +pytest.importorskip("matplotlib") + + +@pytest.fixture +def hist_df(): + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), columns=["A", "B"] + ) + df["C"] = np.random.default_rng(2).choice(["a", "b", "c"], 30) + df["D"] = np.random.default_rng(2).choice(["a", "b", "c"], 30) + return df + + +class TestHistWithBy: + @pytest.mark.slow + @pytest.mark.parametrize( + "by, column, titles, legends", + [ + ("C", "A", ["a", "b", "c"], [["A"]] * 3), + ("C", ["A", "B"], ["a", "b", "c"], [["A", "B"]] * 3), + ("C", None, ["a", "b", "c"], [["A", "B"]] * 3), + ( + ["C", "D"], + "A", + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A"]] * 3, + ), + ( + ["C", "D"], + ["A", "B"], + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A", "B"]] * 3, + ), + ( + ["C", "D"], + None, + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A", "B"]] * 3, + ), + ], + ) + def test_hist_plot_by_argument(self, by, column, titles, legends, hist_df): + # GH 15079 + axes = _check_plot_works( + hist_df.plot.hist, column=column, by=by, default_axes=True + ) + result_titles = [ax.get_title() for ax in axes] + result_legends = [ + [legend.get_text() for legend in ax.get_legend().texts] for ax in axes + ] + + assert result_legends == legends + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column, titles, legends", + [ + (0, "A", ["a", "b", "c"], [["A"]] * 3), + (0, None, ["a", "b", "c"], [["A", "B"]] * 3), + ( + [0, "D"], + "A", + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A"]] * 3, + ), + ], + ) + def test_hist_plot_by_0(self, by, column, titles, legends, hist_df): + # GH 15079 + df = hist_df.copy() + df = df.rename(columns={"C": 0}) + + axes = _check_plot_works(df.plot.hist, default_axes=True, column=column, by=by) + result_titles = [ax.get_title() for ax in axes] + result_legends = [ + [legend.get_text() for legend in ax.get_legend().texts] for ax in axes + ] + + assert result_legends == legends + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column", + [ + ([], ["A"]), + ([], ["A", "B"]), + ((), None), + ((), ["A", "B"]), + ], + ) + def test_hist_plot_empty_list_string_tuple_by(self, by, column, hist_df): + # GH 15079 + msg = "No group keys passed" + with pytest.raises(ValueError, match=msg): + _check_plot_works( + hist_df.plot.hist, default_axes=True, column=column, by=by + ) + + @pytest.mark.slow + @pytest.mark.parametrize( + "by, column, layout, axes_num", + [ + (["C"], "A", (2, 2), 3), + ("C", "A", (2, 2), 3), + (["C"], ["A"], (1, 3), 3), + ("C", None, (3, 1), 3), + ("C", ["A", "B"], (3, 1), 3), + (["C", "D"], "A", (9, 1), 3), + (["C", "D"], "A", (3, 3), 3), + (["C", "D"], ["A"], (5, 2), 3), + (["C", "D"], ["A", "B"], (9, 1), 3), + (["C", "D"], None, (9, 1), 3), + (["C", "D"], ["A", "B"], (5, 2), 3), + ], + ) + def test_hist_plot_layout_with_by(self, by, column, layout, axes_num, hist_df): + # GH 15079 + # _check_plot_works adds an ax so catch warning. see GH #13188 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + hist_df.plot.hist, column=column, by=by, layout=layout + ) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + @pytest.mark.parametrize( + "msg, by, layout", + [ + ("larger than required size", ["C", "D"], (1, 1)), + (re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)), + ("At least one dimension of layout must be positive", "C", (-1, -1)), + ], + ) + def test_hist_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df): + # GH 15079, test if error is raised when invalid layout is given + + with pytest.raises(ValueError, match=msg): + hist_df.plot.hist(column=["A", "B"], by=by, layout=layout) + + @pytest.mark.slow + def test_axis_share_x_with_by(self, hist_df): + # GH 15079 + ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharex=True) + + # share x + assert get_x_axis(ax1).joined(ax1, ax2) + assert get_x_axis(ax2).joined(ax1, ax2) + assert get_x_axis(ax3).joined(ax1, ax3) + assert get_x_axis(ax3).joined(ax2, ax3) + + # don't share y + assert not get_y_axis(ax1).joined(ax1, ax2) + assert not get_y_axis(ax2).joined(ax1, ax2) + assert not get_y_axis(ax3).joined(ax1, ax3) + assert not get_y_axis(ax3).joined(ax2, ax3) + + @pytest.mark.slow + def test_axis_share_y_with_by(self, hist_df): + # GH 15079 + ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharey=True) + + # share y + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + assert get_y_axis(ax3).joined(ax1, ax3) + assert get_y_axis(ax3).joined(ax2, ax3) + + # don't share x + assert not get_x_axis(ax1).joined(ax1, ax2) + assert not get_x_axis(ax2).joined(ax1, ax2) + assert not get_x_axis(ax3).joined(ax1, ax3) + assert not get_x_axis(ax3).joined(ax2, ax3) + + @pytest.mark.parametrize("figsize", [(12, 8), (20, 10)]) + def test_figure_shape_hist_with_by(self, figsize, hist_df): + # GH 15079 + axes = hist_df.plot.hist(column="A", by="C", figsize=figsize) + _check_axes_shape(axes, axes_num=3, figsize=figsize) + + +class TestBoxWithBy: + @pytest.mark.parametrize( + "by, column, titles, xticklabels", + [ + ("C", "A", ["A"], [["a", "b", "c"]]), + ( + ["C", "D"], + "A", + ["A"], + [ + [ + "(a, a)", + "(b, b)", + "(c, c)", + ] + ], + ), + ("C", ["A", "B"], ["A", "B"], [["a", "b", "c"]] * 2), + ( + ["C", "D"], + ["A", "B"], + ["A", "B"], + [ + [ + "(a, a)", + "(b, b)", + "(c, c)", + ] + ] + * 2, + ), + (["C"], None, ["A", "B"], [["a", "b", "c"]] * 2), + ], + ) + def test_box_plot_by_argument(self, by, column, titles, xticklabels, hist_df): + # GH 15079 + axes = _check_plot_works( + hist_df.plot.box, default_axes=True, column=column, by=by + ) + result_titles = [ax.get_title() for ax in axes] + result_xticklabels = [ + [label.get_text() for label in ax.get_xticklabels()] for ax in axes + ] + + assert result_xticklabels == xticklabels + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column, titles, xticklabels", + [ + (0, "A", ["A"], [["a", "b", "c"]]), + ( + [0, "D"], + "A", + ["A"], + [ + [ + "(a, a)", + "(b, b)", + "(c, c)", + ] + ], + ), + (0, None, ["A", "B"], [["a", "b", "c"]] * 2), + ], + ) + def test_box_plot_by_0(self, by, column, titles, xticklabels, hist_df): + # GH 15079 + df = hist_df.copy() + df = df.rename(columns={"C": 0}) + + axes = _check_plot_works(df.plot.box, default_axes=True, column=column, by=by) + result_titles = [ax.get_title() for ax in axes] + result_xticklabels = [ + [label.get_text() for label in ax.get_xticklabels()] for ax in axes + ] + + assert result_xticklabels == xticklabels + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column", + [ + ([], ["A"]), + ((), "A"), + ([], None), + ((), ["A", "B"]), + ], + ) + def test_box_plot_with_none_empty_list_by(self, by, column, hist_df): + # GH 15079 + msg = "No group keys passed" + with pytest.raises(ValueError, match=msg): + _check_plot_works(hist_df.plot.box, default_axes=True, column=column, by=by) + + @pytest.mark.slow + @pytest.mark.parametrize( + "by, column, layout, axes_num", + [ + (["C"], "A", (1, 1), 1), + ("C", "A", (1, 1), 1), + ("C", None, (2, 1), 2), + ("C", ["A", "B"], (1, 2), 2), + (["C", "D"], "A", (1, 1), 1), + (["C", "D"], None, (1, 2), 2), + ], + ) + def test_box_plot_layout_with_by(self, by, column, layout, axes_num, hist_df): + # GH 15079 + axes = _check_plot_works( + hist_df.plot.box, default_axes=True, column=column, by=by, layout=layout + ) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + @pytest.mark.parametrize( + "msg, by, layout", + [ + ("larger than required size", ["C", "D"], (1, 1)), + (re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)), + ("At least one dimension of layout must be positive", "C", (-1, -1)), + ], + ) + def test_box_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df): + # GH 15079, test if error is raised when invalid layout is given + + with pytest.raises(ValueError, match=msg): + hist_df.plot.box(column=["A", "B"], by=by, layout=layout) + + @pytest.mark.parametrize("figsize", [(12, 8), (20, 10)]) + def test_figure_shape_hist_with_by(self, figsize, hist_df): + # GH 15079 + axes = hist_df.plot.box(column="A", by="C", figsize=figsize) + _check_axes_shape(axes, axes_num=1, figsize=figsize) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..8a8643415ae12f96bbbd87ed85ff74f8813b07e4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py @@ -0,0 +1,98 @@ +import sys +import types + +import pytest + +import pandas.util._test_decorators as td + +import pandas + + +@pytest.fixture +def dummy_backend(): + db = types.ModuleType("pandas_dummy_backend") + setattr(db, "plot", lambda *args, **kwargs: "used_dummy") + return db + + +@pytest.fixture +def restore_backend(): + """Restore the plotting backend to matplotlib""" + with pandas.option_context("plotting.backend", "matplotlib"): + yield + + +def test_backend_is_not_module(): + msg = "Could not find plotting backend 'not_an_existing_module'." + with pytest.raises(ValueError, match=msg): + pandas.set_option("plotting.backend", "not_an_existing_module") + + assert pandas.options.plotting.backend == "matplotlib" + + +def test_backend_is_correct(monkeypatch, restore_backend, dummy_backend): + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + + pandas.set_option("plotting.backend", "pandas_dummy_backend") + assert pandas.get_option("plotting.backend") == "pandas_dummy_backend" + assert ( + pandas.plotting._core._get_plot_backend("pandas_dummy_backend") is dummy_backend + ) + + +def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend, dummy_backend): + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + df = pandas.DataFrame([1, 2, 3]) + + assert pandas.get_option("plotting.backend") == "matplotlib" + assert df.plot(backend="pandas_dummy_backend") == "used_dummy" + + +def test_register_entrypoint(restore_backend, tmp_path, monkeypatch, dummy_backend): + monkeypatch.syspath_prepend(tmp_path) + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + + dist_info = tmp_path / "my_backend-0.0.0.dist-info" + dist_info.mkdir() + # entry_point name should not match module name - otherwise pandas will + # fall back to backend lookup by module name + (dist_info / "entry_points.txt").write_bytes( + b"[pandas_plotting_backends]\nmy_ep_backend = pandas_dummy_backend\n" + ) + + assert pandas.plotting._core._get_plot_backend("my_ep_backend") is dummy_backend + + with pandas.option_context("plotting.backend", "my_ep_backend"): + assert pandas.plotting._core._get_plot_backend() is dummy_backend + + +def test_setting_backend_without_plot_raises(monkeypatch): + # GH-28163 + module = types.ModuleType("pandas_plot_backend") + monkeypatch.setitem(sys.modules, "pandas_plot_backend", module) + + assert pandas.options.plotting.backend == "matplotlib" + with pytest.raises( + ValueError, match="Could not find plotting backend 'pandas_plot_backend'." + ): + pandas.set_option("plotting.backend", "pandas_plot_backend") + + assert pandas.options.plotting.backend == "matplotlib" + + +@td.skip_if_installed("matplotlib") +def test_no_matplotlib_ok(): + msg = ( + 'matplotlib is required for plotting when the default backend "matplotlib" is ' + "selected." + ) + with pytest.raises(ImportError, match=msg): + pandas.plotting._core._get_plot_backend("matplotlib") + + +def test_extra_kinds_ok(monkeypatch, restore_backend, dummy_backend): + # https://github.com/pandas-dev/pandas/pull/28647 + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + pandas.set_option("plotting.backend", "pandas_dummy_backend") + df = pandas.DataFrame({"A": [1, 2, 3]}) + df.plot(kind="not a real kind") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py new file mode 100644 index 0000000000000000000000000000000000000000..76f7fa1f22eec4bdb7464619226352c918d31a02 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py @@ -0,0 +1,761 @@ +""" Test cases for .boxplot method """ + +import itertools +import string + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, + date_range, + plotting, + timedelta_range, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_box_return_type, + _check_plot_works, + _check_ticks_props, + _check_visible, +) + +from pandas.io.formats.printing import pprint_thing + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +def _check_ax_limits(col, ax): + y_min, y_max = ax.get_ylim() + assert y_min <= col.min() + assert y_max >= col.max() + + +class TestDataFramePlots: + def test_stacked_boxplot_set_axis(self): + # GH2980 + import matplotlib.pyplot as plt + + n = 80 + df = DataFrame( + { + "Clinical": np.random.default_rng(2).choice([0, 1, 2, 3], n), + "Confirmed": np.random.default_rng(2).choice([0, 1, 2, 3], n), + "Discarded": np.random.default_rng(2).choice([0, 1, 2, 3], n), + }, + index=np.arange(0, n), + ) + ax = df.plot(kind="bar", stacked=True) + assert [int(x.get_text()) for x in ax.get_xticklabels()] == df.index.to_list() + ax.set_xticks(np.arange(0, 80, 10)) + plt.draw() # Update changes + assert [int(x.get_text()) for x in ax.get_xticklabels()] == list( + np.arange(0, 80, 10) + ) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs, warn", + [ + [{"return_type": "dict"}, None], + [{"column": ["one", "two"]}, None], + [{"column": ["one", "two"], "by": "indic"}, UserWarning], + [{"column": ["one"], "by": ["indic", "indic2"]}, None], + [{"by": "indic"}, UserWarning], + [{"by": ["indic", "indic2"]}, UserWarning], + [{"notch": 1}, None], + [{"by": "indic", "notch": 1}, UserWarning], + ], + ) + def test_boxplot_legacy1(self, kwargs, warn): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + df["indic"] = ["foo", "bar"] * 3 + df["indic2"] = ["foo", "bar", "foo"] * 2 + + # _check_plot_works can add an ax so catch warning. see GH #13188 + with tm.assert_produces_warning(warn, check_stacklevel=False): + _check_plot_works(df.boxplot, **kwargs) + + def test_boxplot_legacy1_series(self): + ser = Series(np.random.default_rng(2).standard_normal(6)) + _check_plot_works(plotting._core.boxplot, data=ser, return_type="dict") + + def test_boxplot_legacy2(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(df.boxplot, by="X") + + def test_boxplot_legacy2_with_ax(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + # When ax is supplied and required number of axes is 1, + # passed ax should be used: + _, ax = mpl.pyplot.subplots() + axes = df.boxplot("Col1", by="X", ax=ax) + ax_axes = ax.axes + assert ax_axes is axes + + def test_boxplot_legacy2_with_ax_return_type(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + fig, ax = mpl.pyplot.subplots() + axes = df.groupby("Y").boxplot(ax=ax, return_type="axes") + ax_axes = ax.axes + assert ax_axes is axes["A"] + + def test_boxplot_legacy2_with_multi_col(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + # Multiple columns with an ax argument should use same figure + fig, ax = mpl.pyplot.subplots() + with tm.assert_produces_warning(UserWarning): + axes = df.boxplot( + column=["Col1", "Col2"], by="X", ax=ax, return_type="axes" + ) + assert axes["Col1"].get_figure() is fig + + def test_boxplot_legacy2_by_none(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + # When by is None, check that all relevant lines are present in the + # dict + _, ax = mpl.pyplot.subplots() + d = df.boxplot(ax=ax, return_type="dict") + lines = list(itertools.chain.from_iterable(d.values())) + assert len(ax.get_lines()) == len(lines) + + def test_boxplot_return_type_none(self, hist_df): + # GH 12216; return_type=None & by=None -> axes + result = hist_df.boxplot() + assert isinstance(result, mpl.pyplot.Axes) + + def test_boxplot_return_type_legacy(self): + # API change in https://github.com/pandas-dev/pandas/pull/7096 + + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + msg = "return_type must be {'axes', 'dict', 'both'}" + with pytest.raises(ValueError, match=msg): + df.boxplot(return_type="NOT_A_TYPE") + + result = df.boxplot() + _check_box_return_type(result, "axes") + + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_boxplot_return_type_legacy_return_type(self, return_type): + # API change in https://github.com/pandas-dev/pandas/pull/7096 + + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + with tm.assert_produces_warning(False): + result = df.boxplot(return_type=return_type) + _check_box_return_type(result, return_type) + + def test_boxplot_axis_limits(self, hist_df): + df = hist_df.copy() + df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0]) + # One full row + height_ax, weight_ax = df.boxplot(["height", "weight"], by="category") + _check_ax_limits(df["height"], height_ax) + _check_ax_limits(df["weight"], weight_ax) + assert weight_ax._sharey == height_ax + + def test_boxplot_axis_limits_two_rows(self, hist_df): + df = hist_df.copy() + df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0]) + # Two rows, one partial + p = df.boxplot(["height", "weight", "age"], by="category") + height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0] + dummy_ax = p[1, 1] + + _check_ax_limits(df["height"], height_ax) + _check_ax_limits(df["weight"], weight_ax) + _check_ax_limits(df["age"], age_ax) + assert weight_ax._sharey == height_ax + assert age_ax._sharey == height_ax + assert dummy_ax._sharey is None + + def test_boxplot_empty_column(self): + df = DataFrame(np.random.default_rng(2).standard_normal((20, 4))) + df.loc[:, 0] = np.nan + _check_plot_works(df.boxplot, return_type="axes") + + def test_figsize(self): + df = DataFrame( + np.random.default_rng(2).random((10, 5)), columns=["A", "B", "C", "D", "E"] + ) + result = df.boxplot(return_type="axes", figsize=(12, 8)) + assert result.figure.bbox_inches.width == 12 + assert result.figure.bbox_inches.height == 8 + + def test_fontsize(self): + df = DataFrame({"a": [1, 2, 3, 4, 5, 6]}) + _check_ticks_props(df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16) + + def test_boxplot_numeric_data(self): + # GH 22799 + df = DataFrame( + { + "a": date_range("2012-01-01", periods=100), + "b": np.random.default_rng(2).standard_normal(100), + "c": np.random.default_rng(2).standard_normal(100) + 2, + "d": date_range("2012-01-01", periods=100).astype(str), + "e": date_range("2012-01-01", periods=100, tz="UTC"), + "f": timedelta_range("1 days", periods=100), + } + ) + ax = df.plot(kind="box") + assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"] + + @pytest.mark.parametrize( + "colors_kwd, expected", + [ + ( + {"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"}, + {"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"}, + ), + ({"boxes": "r"}, {"boxes": "r"}), + ("r", {"boxes": "r", "whiskers": "r", "medians": "r", "caps": "r"}), + ], + ) + def test_color_kwd(self, colors_kwd, expected): + # GH: 26214 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + result = df.boxplot(color=colors_kwd, return_type="dict") + for k, v in expected.items(): + assert result[k][0].get_color() == v + + @pytest.mark.parametrize( + "scheme,expected", + [ + ( + "dark_background", + { + "boxes": "#8dd3c7", + "whiskers": "#8dd3c7", + "medians": "#bfbbd9", + "caps": "#8dd3c7", + }, + ), + ( + "default", + { + "boxes": "#1f77b4", + "whiskers": "#1f77b4", + "medians": "#2ca02c", + "caps": "#1f77b4", + }, + ), + ], + ) + def test_colors_in_theme(self, scheme, expected): + # GH: 40769 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + import matplotlib.pyplot as plt + + plt.style.use(scheme) + result = df.plot.box(return_type="dict") + for k, v in expected.items(): + assert result[k][0].get_color() == v + + @pytest.mark.parametrize( + "dict_colors, msg", + [({"boxes": "r", "invalid_key": "r"}, "invalid key 'invalid_key'")], + ) + def test_color_kwd_errors(self, dict_colors, msg): + # GH: 26214 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + with pytest.raises(ValueError, match=msg): + df.boxplot(color=dict_colors, return_type="dict") + + @pytest.mark.parametrize( + "props, expected", + [ + ("boxprops", "boxes"), + ("whiskerprops", "whiskers"), + ("capprops", "caps"), + ("medianprops", "medians"), + ], + ) + def test_specified_props_kwd(self, props, expected): + # GH 30346 + df = DataFrame({k: np.random.default_rng(2).random(10) for k in "ABC"}) + kwd = {props: {"color": "C1"}} + result = df.boxplot(return_type="dict", **kwd) + + assert result[expected][0].get_color() == "C1" + + @pytest.mark.parametrize("vert", [True, False]) + def test_plot_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + xlabel, ylabel = "x", "y" + ax = df.plot(kind="box", vert=vert, xlabel=xlabel, ylabel=ylabel) + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + + @pytest.mark.parametrize("vert", [True, False]) + def test_plot_box(self, vert): + # GH 54941 + rng = np.random.default_rng(2) + df1 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD")) + df2 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD")) + + xlabel, ylabel = "x", "y" + _, axs = plt.subplots(ncols=2, figsize=(10, 7), sharey=True) + df1.plot.box(ax=axs[0], vert=vert, xlabel=xlabel, ylabel=ylabel) + df2.plot.box(ax=axs[1], vert=vert, xlabel=xlabel, ylabel=ylabel) + for ax in axs: + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + mpl.pyplot.close() + + @pytest.mark.parametrize("vert", [True, False]) + def test_boxplot_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + xlabel, ylabel = "x", "y" + ax = df.boxplot(vert=vert, xlabel=xlabel, ylabel=ylabel) + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + + @pytest.mark.parametrize("vert", [True, False]) + def test_boxplot_group_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + xlabel, ylabel = "x", "y" + ax = df.boxplot(by="group", vert=vert, xlabel=xlabel, ylabel=ylabel) + for subplot in ax: + assert subplot.get_xlabel() == xlabel + assert subplot.get_ylabel() == ylabel + mpl.pyplot.close() + + @pytest.mark.parametrize("vert", [True, False]) + def test_boxplot_group_no_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + ax = df.boxplot(by="group", vert=vert) + for subplot in ax: + target_label = subplot.get_xlabel() if vert else subplot.get_ylabel() + assert target_label == pprint_thing(["group"]) + mpl.pyplot.close() + + +class TestDataFrameGroupByPlots: + def test_boxplot_legacy1(self, hist_df): + grouped = hist_df.groupby(by="gender") + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(grouped.boxplot, return_type="axes") + _check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2)) + + def test_boxplot_legacy1_return_type(self, hist_df): + grouped = hist_df.groupby(by="gender") + axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes") + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.slow + def test_boxplot_legacy2(self): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + grouped = df.groupby(level=1) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(grouped.boxplot, return_type="axes") + _check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3)) + + @pytest.mark.slow + def test_boxplot_legacy2_return_type(self): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + grouped = df.groupby(level=1) + axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes") + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.parametrize( + "subplots, warn, axes_num, layout", + [[True, UserWarning, 3, (2, 2)], [False, None, 1, (1, 1)]], + ) + def test_boxplot_legacy3(self, subplots, warn, axes_num, layout): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = df.unstack(level=1).groupby(level=0, axis=1) + with tm.assert_produces_warning(warn, check_stacklevel=False): + axes = _check_plot_works( + grouped.boxplot, subplots=subplots, return_type="axes" + ) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + def test_grouped_plot_fignums(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + df = DataFrame({"height": height, "weight": weight, "gender": gender}) + gb = df.groupby("gender") + + res = gb.plot() + assert len(mpl.pyplot.get_fignums()) == 2 + assert len(res) == 2 + plt.close("all") + + res = gb.boxplot(return_type="axes") + assert len(mpl.pyplot.get_fignums()) == 1 + assert len(res) == 2 + + def test_grouped_plot_fignums_excluded_col(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + df = DataFrame({"height": height, "weight": weight, "gender": gender}) + # now works with GH 5610 as gender is excluded + df.groupby("gender").hist() + + @pytest.mark.slow + def test_grouped_box_return_type(self, hist_df): + df = hist_df + + # old style: return_type=None + result = df.boxplot(by="gender") + assert isinstance(result, np.ndarray) + _check_box_return_type( + result, None, expected_keys=["height", "weight", "category"] + ) + + @pytest.mark.slow + def test_grouped_box_return_type_groupby(self, hist_df): + df = hist_df + # now for groupby + result = df.groupby("gender").boxplot(return_type="dict") + _check_box_return_type(result, "dict", expected_keys=["Male", "Female"]) + + @pytest.mark.slow + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_grouped_box_return_type_arg(self, hist_df, return_type): + df = hist_df + + returned = df.groupby("classroom").boxplot(return_type=return_type) + _check_box_return_type(returned, return_type, expected_keys=["A", "B", "C"]) + + returned = df.boxplot(by="classroom", return_type=return_type) + _check_box_return_type( + returned, return_type, expected_keys=["height", "weight", "category"] + ) + + @pytest.mark.slow + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_grouped_box_return_type_arg_duplcate_cats(self, return_type): + columns2 = "X B C D A".split() + df2 = DataFrame( + np.random.default_rng(2).standard_normal((6, 5)), columns=columns2 + ) + categories2 = "A B".split() + df2["category"] = categories2 * 3 + + returned = df2.groupby("category").boxplot(return_type=return_type) + _check_box_return_type(returned, return_type, expected_keys=categories2) + + returned = df2.boxplot(by="category", return_type=return_type) + _check_box_return_type(returned, return_type, expected_keys=columns2) + + @pytest.mark.slow + def test_grouped_box_layout_too_small(self, hist_df): + df = hist_df + + msg = "Layout of 1x1 must be larger than required size 2" + with pytest.raises(ValueError, match=msg): + df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1)) + + @pytest.mark.slow + def test_grouped_box_layout_needs_by(self, hist_df): + df = hist_df + msg = "The 'layout' keyword is not supported when 'by' is None" + with pytest.raises(ValueError, match=msg): + df.boxplot( + column=["height", "weight", "category"], + layout=(2, 1), + return_type="dict", + ) + + @pytest.mark.slow + def test_grouped_box_layout_positive_layout(self, hist_df): + df = hist_df + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "gb_key, axes_num, rows", + [["gender", 2, 1], ["category", 4, 2], ["classroom", 3, 2]], + ) + def test_grouped_box_layout_positive_layout_axes( + self, hist_df, gb_key, axes_num, rows + ): + df = hist_df + # _check_plot_works adds an ax so catch warning. see GH #13188 GH 6769 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works( + df.groupby(gb_key).boxplot, column="height", return_type="dict" + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=axes_num, layout=(rows, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "col, visible", [["height", False], ["weight", True], ["category", True]] + ) + def test_grouped_box_layout_visible(self, hist_df, col, visible): + df = hist_df + # GH 5897 + axes = df.boxplot( + column=["height", "weight", "category"], by="gender", return_type="axes" + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2)) + ax = axes[col] + _check_visible(ax.get_xticklabels(), visible=visible) + _check_visible([ax.xaxis.get_label()], visible=visible) + + @pytest.mark.slow + def test_grouped_box_layout_shape(self, hist_df): + df = hist_df + df.groupby("classroom").boxplot( + column=["height", "weight", "category"], return_type="dict" + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize("cols", [2, -1]) + def test_grouped_box_layout_works(self, hist_df, cols): + df = hist_df + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works( + df.groupby("category").boxplot, + column="height", + layout=(3, cols), + return_type="dict", + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(3, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize("rows, res", [[4, 4], [-1, 3]]) + def test_grouped_box_layout_axes_shape_rows(self, hist_df, rows, res): + df = hist_df + df.boxplot( + column=["height", "weight", "category"], by="gender", layout=(rows, 1) + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(res, 1)) + + @pytest.mark.slow + @pytest.mark.parametrize("cols, res", [[4, 4], [-1, 3]]) + def test_grouped_box_layout_axes_shape_cols_groupby(self, hist_df, cols, res): + df = hist_df + df.groupby("classroom").boxplot( + column=["height", "weight", "category"], + layout=(1, cols), + return_type="dict", + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(1, res)) + + @pytest.mark.slow + def test_grouped_box_multiple_axes(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + + # check warning to ignore sharex / sharey + # this check should be done in the first function which + # passes multiple axes to plot, hist or boxplot + # location should be changed if other test is added + # which has earlier alphabetical order + with tm.assert_produces_warning(UserWarning): + _, axes = mpl.pyplot.subplots(2, 2) + df.groupby("category").boxplot(column="height", return_type="axes", ax=axes) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(2, 2)) + + @pytest.mark.slow + def test_grouped_box_multiple_axes_on_fig(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + fig, axes = mpl.pyplot.subplots(2, 3) + with tm.assert_produces_warning(UserWarning): + returned = df.boxplot( + column=["height", "weight", "category"], + by="gender", + return_type="axes", + ax=axes[0], + ) + returned = np.array(list(returned.values)) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[0]) + assert returned[0].figure is fig + + # draw on second row + with tm.assert_produces_warning(UserWarning): + returned = df.groupby("classroom").boxplot( + column=["height", "weight", "category"], return_type="axes", ax=axes[1] + ) + returned = np.array(list(returned.values)) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[1]) + assert returned[0].figure is fig + + @pytest.mark.slow + def test_grouped_box_multiple_axes_ax_error(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + msg = "The number of passed axes must be 3, the same as the output plot" + with pytest.raises(ValueError, match=msg): + fig, axes = mpl.pyplot.subplots(2, 3) + # pass different number of axes from required + with tm.assert_produces_warning(UserWarning): + axes = df.groupby("classroom").boxplot(ax=axes) + + def test_fontsize(self): + df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]}) + _check_ticks_props( + df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16 + ) + + @pytest.mark.parametrize( + "col, expected_xticklabel", + [ + ("v", ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]), + (["v"], ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]), + ("v1", ["(a, v1)", "(b, v1)", "(c, v1)", "(d, v1)", "(e, v1)"]), + ( + ["v", "v1"], + [ + "(a, v)", + "(a, v1)", + "(b, v)", + "(b, v1)", + "(c, v)", + "(c, v1)", + "(d, v)", + "(d, v1)", + "(e, v)", + "(e, v1)", + ], + ), + ( + None, + [ + "(a, v)", + "(a, v1)", + "(b, v)", + "(b, v1)", + "(c, v)", + "(c, v1)", + "(d, v)", + "(d, v1)", + "(e, v)", + "(e, v1)", + ], + ), + ], + ) + def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel): + # GH 16748 + df = DataFrame( + { + "cat": np.random.default_rng(2).choice(list("abcde"), 100), + "v": np.random.default_rng(2).random(100), + "v1": np.random.default_rng(2).random(100), + } + ) + grouped = df.groupby("cat") + + axes = _check_plot_works( + grouped.boxplot, subplots=False, column=col, return_type="axes" + ) + + result_xticklabel = [x.get_text() for x in axes.get_xticklabels()] + assert expected_xticklabel == result_xticklabel + + def test_groupby_boxplot_object(self, hist_df): + # GH 43480 + df = hist_df.astype("object") + grouped = df.groupby("gender") + msg = "boxplot method requires numerical columns, nothing to plot" + with pytest.raises(ValueError, match=msg): + _check_plot_works(grouped.boxplot, subplots=False) + + def test_boxplot_multiindex_column(self): + # GH 16748 + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + tuples = list(zip(*arrays)) + index = MultiIndex.from_tuples(tuples, names=["first", "second"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 8)), + index=["A", "B", "C"], + columns=index, + ) + + col = [("bar", "one"), ("bar", "two")] + axes = _check_plot_works(df.boxplot, column=col, return_type="axes") + + expected_xticklabel = ["(bar, one)", "(bar, two)"] + result_xticklabel = [x.get_text() for x in axes.get_xticklabels()] + assert expected_xticklabel == result_xticklabel diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..20daf5935624843af3224f991497f84fa6639a0d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py @@ -0,0 +1,60 @@ +import pytest + +from pandas import DataFrame +from pandas.tests.plotting.common import ( + _check_plot_works, + _check_ticks_props, + _gen_two_subplots, +) + +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestCommon: + def test__check_ticks_props(self): + # GH 34768 + df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]}) + ax = _check_plot_works(df.plot, rot=30) + ax.yaxis.set_tick_params(rotation=30) + msg = "expected 0.00000 but got " + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, xrot=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, xlabelsize=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, yrot=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, ylabelsize=0) + + def test__gen_two_subplots_with_ax(self): + fig = plt.gcf() + gen = _gen_two_subplots(f=lambda **kwargs: None, fig=fig, ax="test") + # On the first yield, no subplot should be added since ax was passed + next(gen) + assert fig.get_axes() == [] + # On the second, the one axis should match fig.subplot(2, 1, 2) + next(gen) + axes = fig.get_axes() + assert len(axes) == 1 + subplot_geometry = list(axes[0].get_subplotspec().get_geometry()[:-1]) + subplot_geometry[-1] += 1 + assert subplot_geometry == [2, 1, 2] + + def test_colorbar_layout(self): + fig = plt.figure() + + axes = fig.subplot_mosaic( + """ + AB + CC + """ + ) + + x = [1, 2, 3] + y = [1, 2, 3] + + cs0 = axes["A"].scatter(x, y) + axes["B"].scatter(x, y) + + fig.colorbar(cs0, ax=[axes["A"], axes["B"]], location="right") + DataFrame(x).plot(ax=axes["C"]) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..f748d7c5fc758045fc5d3475b94e376a06f5269b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py @@ -0,0 +1,410 @@ +from datetime import ( + date, + datetime, +) +import subprocess +import sys + +import numpy as np +import pytest + +import pandas._config.config as cf + +from pandas._libs.tslibs import to_offset + +from pandas import ( + Index, + Period, + PeriodIndex, + Series, + Timestamp, + arrays, + date_range, +) +import pandas._testing as tm + +from pandas.plotting import ( + deregister_matplotlib_converters, + register_matplotlib_converters, +) +from pandas.tseries.offsets import ( + Day, + Micro, + Milli, + Second, +) + +try: + from pandas.plotting._matplotlib import converter +except ImportError: + # try / except, rather than skip, to avoid internal refactoring + # causing an improper skip + pass + +pytest.importorskip("matplotlib.pyplot") +dates = pytest.importorskip("matplotlib.dates") + + +@pytest.mark.single_cpu +def test_registry_mpl_resets(): + # Check that Matplotlib converters are properly reset (see issue #27481) + code = ( + "import matplotlib.units as units; " + "import matplotlib.dates as mdates; " + "n_conv = len(units.registry); " + "import pandas as pd; " + "pd.plotting.register_matplotlib_converters(); " + "pd.plotting.deregister_matplotlib_converters(); " + "assert len(units.registry) == n_conv" + ) + call = [sys.executable, "-c", code] + subprocess.check_output(call) + + +def test_timtetonum_accepts_unicode(): + assert converter.time2num("00:01") == converter.time2num("00:01") + + +class TestRegistration: + @pytest.mark.single_cpu + def test_dont_register_by_default(self): + # Run in subprocess to ensure a clean state + code = ( + "import matplotlib.units; " + "import pandas as pd; " + "units = dict(matplotlib.units.registry); " + "assert pd.Timestamp not in units" + ) + call = [sys.executable, "-c", code] + assert subprocess.check_call(call) == 0 + + def test_registering_no_warning(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range("2017", periods=12)) + _, ax = plt.subplots() + + # Set to the "warn" state, in case this isn't the first test run + register_matplotlib_converters() + ax.plot(s.index, s.values) + plt.close() + + def test_pandas_plots_register(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range("2017", periods=12)) + # Set to the "warn" state, in case this isn't the first test run + with tm.assert_produces_warning(None) as w: + s.plot() + + try: + assert len(w) == 0 + finally: + plt.close() + + def test_matplotlib_formatters(self): + units = pytest.importorskip("matplotlib.units") + + # Can't make any assertion about the start state. + # We we check that toggling converters off removes it, and toggling it + # on restores it. + + with cf.option_context("plotting.matplotlib.register_converters", True): + with cf.option_context("plotting.matplotlib.register_converters", False): + assert Timestamp not in units.registry + assert Timestamp in units.registry + + def test_option_no_warning(self): + pytest.importorskip("matplotlib.pyplot") + ctx = cf.option_context("plotting.matplotlib.register_converters", False) + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range("2017", periods=12)) + _, ax = plt.subplots() + + # Test without registering first, no warning + with ctx: + ax.plot(s.index, s.values) + + # Now test with registering + register_matplotlib_converters() + with ctx: + ax.plot(s.index, s.values) + plt.close() + + def test_registry_resets(self): + units = pytest.importorskip("matplotlib.units") + dates = pytest.importorskip("matplotlib.dates") + + # make a copy, to reset to + original = dict(units.registry) + + try: + # get to a known state + units.registry.clear() + date_converter = dates.DateConverter() + units.registry[datetime] = date_converter + units.registry[date] = date_converter + + register_matplotlib_converters() + assert units.registry[date] is not date_converter + deregister_matplotlib_converters() + assert units.registry[date] is date_converter + + finally: + # restore original stater + units.registry.clear() + for k, v in original.items(): + units.registry[k] = v + + +class TestDateTimeConverter: + @pytest.fixture + def dtc(self): + return converter.DatetimeConverter() + + def test_convert_accepts_unicode(self, dtc): + r1 = dtc.convert("2000-01-01 12:22", None, None) + r2 = dtc.convert("2000-01-01 12:22", None, None) + assert r1 == r2, "DatetimeConverter.convert should accept unicode" + + def test_conversion(self, dtc): + rs = dtc.convert(["2012-1-1"], None, None)[0] + xp = dates.date2num(datetime(2012, 1, 1)) + assert rs == xp + + rs = dtc.convert("2012-1-1", None, None) + assert rs == xp + + rs = dtc.convert(date(2012, 1, 1), None, None) + assert rs == xp + + rs = dtc.convert("2012-1-1", None, None) + assert rs == xp + + rs = dtc.convert(Timestamp("2012-1-1"), None, None) + assert rs == xp + + # also testing datetime64 dtype (GH8614) + rs = dtc.convert("2012-01-01", None, None) + assert rs == xp + + rs = dtc.convert("2012-01-01 00:00:00+0000", None, None) + assert rs == xp + + rs = dtc.convert( + np.array(["2012-01-01 00:00:00+0000", "2012-01-02 00:00:00+0000"]), + None, + None, + ) + assert rs[0] == xp + + # we have a tz-aware date (constructed to that when we turn to utc it + # is the same as our sample) + ts = Timestamp("2012-01-01").tz_localize("UTC").tz_convert("US/Eastern") + rs = dtc.convert(ts, None, None) + assert rs == xp + + rs = dtc.convert(ts.to_pydatetime(), None, None) + assert rs == xp + + rs = dtc.convert(Index([ts - Day(1), ts]), None, None) + assert rs[1] == xp + + rs = dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(), None, None) + assert rs[1] == xp + + def test_conversion_float(self, dtc): + rtol = 0.5 * 10**-9 + + rs = dtc.convert(Timestamp("2012-1-1 01:02:03", tz="UTC"), None, None) + xp = converter.mdates.date2num(Timestamp("2012-1-1 01:02:03", tz="UTC")) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + rs = dtc.convert( + Timestamp("2012-1-1 09:02:03", tz="Asia/Hong_Kong"), None, None + ) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + rs = dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + @pytest.mark.parametrize( + "values", + [ + [date(1677, 1, 1), date(1677, 1, 2)], + [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)], + ], + ) + def test_conversion_outofbounds_datetime(self, dtc, values): + # 2579 + rs = dtc.convert(values, None, None) + xp = converter.mdates.date2num(values) + tm.assert_numpy_array_equal(rs, xp) + rs = dtc.convert(values[0], None, None) + xp = converter.mdates.date2num(values[0]) + assert rs == xp + + @pytest.mark.parametrize( + "time,format_expected", + [ + (0, "00:00"), # time2num(datetime.time.min) + (86399.999999, "23:59:59.999999"), # time2num(datetime.time.max) + (90000, "01:00"), + (3723, "01:02:03"), + (39723.2, "11:02:03.200"), + ], + ) + def test_time_formatter(self, time, format_expected): + # issue 18478 + result = converter.TimeFormatter(None)(time) + assert result == format_expected + + @pytest.mark.parametrize("freq", ("B", "ms", "s")) + def test_dateindex_conversion(self, freq, dtc): + rtol = 10**-9 + dateindex = date_range("2020-01-01", periods=10, freq=freq) + rs = dtc.convert(dateindex, None, None) + xp = converter.mdates.date2num(dateindex._mpl_repr()) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + @pytest.mark.parametrize("offset", [Second(), Milli(), Micro(50)]) + def test_resolution(self, offset, dtc): + # Matplotlib's time representation using floats cannot distinguish + # intervals smaller than ~10 microsecond in the common range of years. + ts1 = Timestamp("2012-1-1") + ts2 = ts1 + offset + val1 = dtc.convert(ts1, None, None) + val2 = dtc.convert(ts2, None, None) + if not val1 < val2: + raise AssertionError(f"{val1} is not less than {val2}.") + + def test_convert_nested(self, dtc): + inner = [Timestamp("2017-01-01"), Timestamp("2017-01-02")] + data = [inner, inner] + result = dtc.convert(data, None, None) + expected = [dtc.convert(x, None, None) for x in data] + assert (np.array(result) == expected).all() + + +class TestPeriodConverter: + @pytest.fixture + def pc(self): + return converter.PeriodConverter() + + @pytest.fixture + def axis(self): + class Axis: + pass + + axis = Axis() + axis.freq = "D" + return axis + + def test_convert_accepts_unicode(self, pc, axis): + r1 = pc.convert("2012-1-1", None, axis) + r2 = pc.convert("2012-1-1", None, axis) + assert r1 == r2 + + def test_conversion(self, pc, axis): + rs = pc.convert(["2012-1-1"], None, axis)[0] + xp = Period("2012-1-1").ordinal + assert rs == xp + + rs = pc.convert("2012-1-1", None, axis) + assert rs == xp + + rs = pc.convert([date(2012, 1, 1)], None, axis)[0] + assert rs == xp + + rs = pc.convert(date(2012, 1, 1), None, axis) + assert rs == xp + + rs = pc.convert([Timestamp("2012-1-1")], None, axis)[0] + assert rs == xp + + rs = pc.convert(Timestamp("2012-1-1"), None, axis) + assert rs == xp + + rs = pc.convert("2012-01-01", None, axis) + assert rs == xp + + rs = pc.convert("2012-01-01 00:00:00+0000", None, axis) + assert rs == xp + + rs = pc.convert( + np.array( + ["2012-01-01 00:00:00", "2012-01-02 00:00:00"], + dtype="datetime64[ns]", + ), + None, + axis, + ) + assert rs[0] == xp + + def test_integer_passthrough(self, pc, axis): + # GH9012 + rs = pc.convert([0, 1], None, axis) + xp = [0, 1] + assert rs == xp + + def test_convert_nested(self, pc, axis): + data = ["2012-1-1", "2012-1-2"] + r1 = pc.convert([data, data], None, axis) + r2 = [pc.convert(data, None, axis) for _ in range(2)] + assert r1 == r2 + + +class TestTimeDeltaConverter: + """Test timedelta converter""" + + @pytest.mark.parametrize( + "x, decimal, format_expected", + [ + (0.0, 0, "00:00:00"), + (3972320000000, 1, "01:06:12.3"), + (713233432000000, 2, "8 days 06:07:13.43"), + (32423432000000, 4, "09:00:23.4320"), + ], + ) + def test_format_timedelta_ticks(self, x, decimal, format_expected): + tdc = converter.TimeSeries_TimedeltaFormatter + result = tdc.format_timedelta_ticks(x, pos=None, n_decimals=decimal) + assert result == format_expected + + @pytest.mark.parametrize("view_interval", [(1, 2), (2, 1)]) + def test_call_w_different_view_intervals(self, view_interval, monkeypatch): + # previously broke on reversed xlmits; see GH37454 + class mock_axis: + def get_view_interval(self): + return view_interval + + tdc = converter.TimeSeries_TimedeltaFormatter() + monkeypatch.setattr(tdc, "axis", mock_axis()) + tdc(0.0, 0) + + +@pytest.mark.parametrize("year_span", [11.25, 30, 80, 150, 400, 800, 1500, 2500, 3500]) +# The range is limited to 11.25 at the bottom by if statements in +# the _quarterly_finder() function +def test_quarterly_finder(year_span): + vmin = -1000 + vmax = vmin + year_span * 4 + span = vmax - vmin + 1 + if span < 45: + pytest.skip("the quarterly finder is only invoked if the span is >= 45") + nyears = span / 4 + (min_anndef, maj_anndef) = converter._get_default_annual_spacing(nyears) + result = converter._quarterly_finder(vmin, vmax, to_offset("QE")) + quarters = PeriodIndex( + arrays.PeriodArray(np.array([x[0] for x in result]), dtype="period[Q]") + ) + majors = np.array([x[1] for x in result]) + minors = np.array([x[2] for x in result]) + major_quarters = quarters[majors] + minor_quarters = quarters[minors] + check_major_years = major_quarters.year % maj_anndef == 0 + check_minor_years = minor_quarters.year % min_anndef == 0 + check_major_quarters = major_quarters.quarter == 1 + check_minor_quarters = minor_quarters.quarter == 1 + assert np.all(check_major_years) + assert np.all(check_minor_years) + assert np.all(check_major_quarters) + assert np.all(check_minor_quarters) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_datetimelike.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_datetimelike.py new file mode 100644 index 0000000000000000000000000000000000000000..112172656b6ecde6a94282f58a8085751085fc44 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_datetimelike.py @@ -0,0 +1,1754 @@ +""" Test cases for time series specific (freq conversion, etc) """ +from datetime import ( + date, + datetime, + time, + timedelta, +) +import pickle + +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + BaseOffset, + to_offset, +) +from pandas._libs.tslibs.dtypes import freq_to_period_freqstr + +from pandas import ( + DataFrame, + Index, + NaT, + Series, + concat, + isna, + to_datetime, +) +import pandas._testing as tm +from pandas.core.indexes.datetimes import ( + DatetimeIndex, + bdate_range, + date_range, +) +from pandas.core.indexes.period import ( + Period, + PeriodIndex, + period_range, +) +from pandas.core.indexes.timedeltas import timedelta_range +from pandas.tests.plotting.common import _check_ticks_props + +from pandas.tseries.offsets import WeekOfMonth + +mpl = pytest.importorskip("matplotlib") + + +class TestTSPlot: + @pytest.mark.filterwarnings("ignore::UserWarning") + def test_ts_plot_with_tz(self, tz_aware_fixture): + # GH2877, GH17173, GH31205, GH31580 + tz = tz_aware_fixture + index = date_range("1/1/2011", periods=2, freq="h", tz=tz) + ts = Series([188.5, 328.25], index=index) + _check_plot_works(ts.plot) + ax = ts.plot() + xdata = next(iter(ax.get_lines())).get_xdata() + # Check first and last points' labels are correct + assert (xdata[0].hour, xdata[0].minute) == (0, 0) + assert (xdata[-1].hour, xdata[-1].minute) == (1, 0) + + def test_fontsize_set_correctly(self): + # For issue #8765 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 9)), index=range(10) + ) + _, ax = mpl.pyplot.subplots() + df.plot(fontsize=2, ax=ax) + for label in ax.get_xticklabels() + ax.get_yticklabels(): + assert label.get_fontsize() == 2 + + def test_frame_inferred(self): + # inferred freq + idx = date_range("1/1/1987", freq="MS", periods=100) + idx = DatetimeIndex(idx.values, freq=None) + + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + _check_plot_works(df.plot) + + # axes freq + idx = idx[0:40].union(idx[45:99]) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + _check_plot_works(df2.plot) + + def test_frame_inferred_n_gt_1(self): + # N > 1 + idx = date_range("2008-1-1 00:15:00", freq="15min", periods=10) + idx = DatetimeIndex(idx.values, freq=None) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + _check_plot_works(df.plot) + + def test_is_error_nozeroindex(self): + # GH11858 + i = np.array([1, 2, 3]) + a = DataFrame(i, index=i) + _check_plot_works(a.plot, xerr=a) + _check_plot_works(a.plot, yerr=a) + + def test_nonnumeric_exclude(self): + idx = date_range("1/1/1987", freq="YE", periods=3) + df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx) + + fig, ax = mpl.pyplot.subplots() + df.plot(ax=ax) # it works + assert len(ax.get_lines()) == 1 # B was plotted + mpl.pyplot.close(fig) + + def test_nonnumeric_exclude_error(self): + idx = date_range("1/1/1987", freq="YE", periods=3) + df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx) + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df["A"].plot() + + @pytest.mark.parametrize("freq", ["s", "min", "h", "D", "W", "M", "Q", "Y"]) + def test_tsplot_period(self, freq): + idx = period_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _, ax = mpl.pyplot.subplots() + _check_plot_works(ser.plot, ax=ax) + + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_tsplot_datetime(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _, ax = mpl.pyplot.subplots() + _check_plot_works(ser.plot, ax=ax) + + def test_tsplot(self): + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + _, ax = mpl.pyplot.subplots() + ts.plot(style="k", ax=ax) + color = (0.0, 0.0, 0.0, 1) + assert color == ax.get_lines()[0].get_color() + + def test_both_style_and_color(self): + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + msg = ( + "Cannot pass 'style' string with a color symbol and 'color' " + "keyword argument. Please use one or the other or pass 'style' " + "without a color symbol" + ) + with pytest.raises(ValueError, match=msg): + ts.plot(style="b-", color="#000099") + + s = ts.reset_index(drop=True) + with pytest.raises(ValueError, match=msg): + s.plot(style="b-", color="#000099") + + @pytest.mark.parametrize("freq", ["ms", "us"]) + def test_high_freq(self, freq): + _, ax = mpl.pyplot.subplots() + rng = date_range("1/1/2012", periods=100, freq=freq) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _check_plot_works(ser.plot, ax=ax) + + def test_get_datevalue(self): + from pandas.plotting._matplotlib.converter import get_datevalue + + assert get_datevalue(None, "D") is None + assert get_datevalue(1987, "Y") == 1987 + assert get_datevalue(Period(1987, "Y"), "M") == Period("1987-12", "M").ordinal + assert get_datevalue("1/1/1987", "D") == Period("1987-1-1", "D").ordinal + + def test_ts_plot_format_coord(self): + def check_format_of_first_point(ax, expected_string): + first_line = ax.get_lines()[0] + first_x = first_line.get_xdata()[0].ordinal + first_y = first_line.get_ydata()[0] + assert expected_string == ax.format_coord(first_x, first_y) + + annual = Series(1, index=date_range("2014-01-01", periods=3, freq="YE-DEC")) + _, ax = mpl.pyplot.subplots() + annual.plot(ax=ax) + check_format_of_first_point(ax, "t = 2014 y = 1.000000") + + # note this is added to the annual plot already in existence, and + # changes its freq field + daily = Series(1, index=date_range("2014-01-01", periods=3, freq="D")) + daily.plot(ax=ax) + check_format_of_first_point(ax, "t = 2014-01-01 y = 1.000000") + + @pytest.mark.parametrize("freq", ["s", "min", "h", "D", "W", "M", "Q", "Y"]) + def test_line_plot_period_series(self, freq): + idx = period_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _check_plot_works(ser.plot, ser.index.freq) + + @pytest.mark.parametrize( + "frqncy", ["1s", "3s", "5min", "7h", "4D", "8W", "11M", "3Y"] + ) + def test_line_plot_period_mlt_series(self, frqncy): + # test period index line plot for series with multiples (`mlt`) of the + # frequency (`frqncy`) rule code. tests resolution of issue #14763 + idx = period_range("12/31/1999", freq=frqncy, periods=100) + s = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _check_plot_works(s.plot, s.index.freq.rule_code) + + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_line_plot_datetime_series(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _check_plot_works(ser.plot, ser.index.freq.rule_code) + + @pytest.mark.parametrize("freq", ["s", "min", "h", "D", "W", "ME", "QE", "YE"]) + def test_line_plot_period_frame(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + index=idx, + columns=["A", "B", "C"], + ) + _check_plot_works(df.plot, df.index.freq) + + @pytest.mark.parametrize( + "frqncy", ["1s", "3s", "5min", "7h", "4D", "8W", "11M", "3Y"] + ) + def test_line_plot_period_mlt_frame(self, frqncy): + # test period index line plot for DataFrames with multiples (`mlt`) + # of the frequency (`frqncy`) rule code. tests resolution of issue + # #14763 + idx = period_range("12/31/1999", freq=frqncy, periods=100) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + index=idx, + columns=["A", "B", "C"], + ) + freq = freq_to_period_freqstr(1, df.index.freq.rule_code) + freq = df.index.asfreq(freq).freq + _check_plot_works(df.plot, freq) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_line_plot_datetime_frame(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + index=idx, + columns=["A", "B", "C"], + ) + freq = freq_to_period_freqstr(1, df.index.freq.rule_code) + freq = df.index.to_period(freq).freq + _check_plot_works(df.plot, freq) + + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_line_plot_inferred_freq(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + ser = Series(ser.values, Index(np.asarray(ser.index))) + _check_plot_works(ser.plot, ser.index.inferred_freq) + + ser = ser.iloc[[0, 3, 5, 6]] + _check_plot_works(ser.plot) + + def test_fake_inferred_business(self): + _, ax = mpl.pyplot.subplots() + rng = date_range("2001-1-1", "2001-1-10") + ts = Series(range(len(rng)), index=rng) + ts = concat([ts[:3], ts[5:]]) + ts.plot(ax=ax) + assert not hasattr(ax, "freq") + + def test_plot_offset_freq(self): + ser = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + _check_plot_works(ser.plot) + + def test_plot_offset_freq_business(self): + dr = date_range("2023-01-01", freq="BQS", periods=10) + ser = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr) + _check_plot_works(ser.plot) + + def test_plot_multiple_inferred_freq(self): + dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(2000, 1, 11)]) + ser = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr) + _check_plot_works(ser.plot) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_uhf(self): + import pandas.plotting._matplotlib.converter as conv + + idx = date_range("2012-6-22 21:59:51.960928", freq="ms", periods=500) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 2)), index=idx + ) + + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + axis = ax.get_xaxis() + + tlocs = axis.get_ticklocs() + tlabels = axis.get_ticklabels() + for loc, label in zip(tlocs, tlabels): + xp = conv._from_ordinal(loc).strftime("%H:%M:%S.%f") + rs = str(label.get_text()) + if len(rs): + assert xp == rs + + def test_irreg_hf(self): + idx = date_range("2012-6-22 21:59:51", freq="s", periods=10) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 2)), index=idx + ) + + irreg = df.iloc[[0, 1, 3, 4]] + _, ax = mpl.pyplot.subplots() + irreg.plot(ax=ax) + diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff() + + sec = 1.0 / 24 / 60 / 60 + assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all() + + def test_irreg_hf_object(self): + idx = date_range("2012-6-22 21:59:51", freq="s", periods=10) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 2)), index=idx + ) + _, ax = mpl.pyplot.subplots() + df2.index = df2.index.astype(object) + df2.plot(ax=ax) + diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff() + sec = 1.0 / 24 / 60 / 60 + assert (np.fabs(diffs[1:] - sec) < 1e-8).all() + + def test_irregular_datetime64_repr_bug(self): + ser = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + ser = ser.iloc[[0, 1, 2, 7]] + + _, ax = mpl.pyplot.subplots() + + ret = ser.plot(ax=ax) + assert ret is not None + + for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index): + assert rs == xp + + def test_business_freq(self): + bts = Series(range(5), period_range("2020-01-01", periods=5)) + msg = r"PeriodDtype\[B\] is deprecated" + dt = bts.index[0].to_timestamp() + with tm.assert_produces_warning(FutureWarning, match=msg): + bts.index = period_range(start=dt, periods=len(bts), freq="B") + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal + idx = ax.get_lines()[0].get_xdata() + with tm.assert_produces_warning(FutureWarning, match=msg): + assert PeriodIndex(data=idx).freqstr == "B" + + def test_business_freq_convert(self): + bts = Series( + np.arange(300, dtype=np.float64), + index=date_range("2020-01-01", periods=300, freq="B"), + ).asfreq("BME") + ts = bts.to_period("M") + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal + idx = ax.get_lines()[0].get_xdata() + assert PeriodIndex(data=idx).freqstr == "M" + + def test_freq_with_no_period_alias(self): + # GH34487 + freq = WeekOfMonth() + bts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ).asfreq(freq) + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + + idx = ax.get_lines()[0].get_xdata() + msg = "freq not specified and cannot be inferred" + with pytest.raises(ValueError, match=msg): + PeriodIndex(data=idx) + + def test_nonzero_base(self): + # GH2571 + idx = date_range("2012-12-20", periods=24, freq="h") + timedelta(minutes=30) + df = DataFrame(np.arange(24), index=idx) + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + rs = ax.get_lines()[0].get_xdata() + assert not Index(rs).is_normalized + + def test_dataframe(self): + bts = DataFrame( + { + "a": Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ) + } + ) + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + idx = ax.get_lines()[0].get_xdata() + tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx)) + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + @pytest.mark.parametrize( + "obj", + [ + Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ), + DataFrame( + { + "a": Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ), + "b": Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ) + + 1, + } + ), + ], + ) + def test_axis_limits(self, obj): + _, ax = mpl.pyplot.subplots() + obj.plot(ax=ax) + xlim = ax.get_xlim() + ax.set_xlim(xlim[0] - 5, xlim[1] + 10) + result = ax.get_xlim() + assert result[0] == xlim[0] - 5 + assert result[1] == xlim[1] + 10 + + # string + expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq)) + ax.set_xlim("1/1/2000", "4/1/2000") + result = ax.get_xlim() + assert int(result[0]) == expected[0].ordinal + assert int(result[1]) == expected[1].ordinal + + # datetime + expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq)) + ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1)) + result = ax.get_xlim() + assert int(result[0]) == expected[0].ordinal + assert int(result[1]) == expected[1].ordinal + fig = ax.get_figure() + mpl.pyplot.close(fig) + + def test_get_finder(self): + import pandas.plotting._matplotlib.converter as conv + + assert conv.get_finder(to_offset("B")) == conv._daily_finder + assert conv.get_finder(to_offset("D")) == conv._daily_finder + assert conv.get_finder(to_offset("ME")) == conv._monthly_finder + assert conv.get_finder(to_offset("QE")) == conv._quarterly_finder + assert conv.get_finder(to_offset("YE")) == conv._annual_finder + assert conv.get_finder(to_offset("W")) == conv._daily_finder + + def test_finder_daily(self): + day_lst = [10, 40, 252, 400, 950, 2750, 10000] + + msg = "Period with BDay freq is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + xpl1 = xpl2 = [Period("1999-1-1", freq="B").ordinal] * len(day_lst) + rs1 = [] + rs2 = [] + for n in day_lst: + rng = bdate_range("1999-1-1", periods=n) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs1.append(xaxis.get_majorticklocs()[0]) + + vmin, vmax = ax.get_xlim() + ax.set_xlim(vmin + 0.9, vmax) + rs2.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs1 == xpl1 + assert rs2 == xpl2 + + def test_finder_quarterly(self): + yrs = [3.5, 11] + + xpl1 = xpl2 = [Period("1988Q1").ordinal] * len(yrs) + rs1 = [] + rs2 = [] + for n in yrs: + rng = period_range("1987Q2", periods=int(n * 4), freq="Q") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs1.append(xaxis.get_majorticklocs()[0]) + + (vmin, vmax) = ax.get_xlim() + ax.set_xlim(vmin + 0.9, vmax) + rs2.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs1 == xpl1 + assert rs2 == xpl2 + + def test_finder_monthly(self): + yrs = [1.15, 2.5, 4, 11] + + xpl1 = xpl2 = [Period("Jan 1988").ordinal] * len(yrs) + rs1 = [] + rs2 = [] + for n in yrs: + rng = period_range("1987Q2", periods=int(n * 12), freq="M") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs1.append(xaxis.get_majorticklocs()[0]) + + vmin, vmax = ax.get_xlim() + ax.set_xlim(vmin + 0.9, vmax) + rs2.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs1 == xpl1 + assert rs2 == xpl2 + + def test_finder_monthly_long(self): + rng = period_range("1988Q1", periods=24 * 12, freq="M") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs = xaxis.get_majorticklocs()[0] + xp = Period("1989Q1", "M").ordinal + assert rs == xp + + def test_finder_annual(self): + xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] + xp = [Period(x, freq="Y").ordinal for x in xp] + rs = [] + for nyears in [5, 10, 19, 49, 99, 199, 599, 1001]: + rng = period_range("1987", periods=nyears, freq="Y") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs == xp + + @pytest.mark.slow + def test_finder_minutely(self): + nminutes = 50 * 24 * 60 + rng = date_range("1/1/1999", freq="Min", periods=nminutes) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs = xaxis.get_majorticklocs()[0] + xp = Period("1/1/1999", freq="Min").ordinal + + assert rs == xp + + def test_finder_hourly(self): + nhours = 23 + rng = date_range("1/1/1999", freq="h", periods=nhours) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs = xaxis.get_majorticklocs()[0] + xp = Period("1/1/1999", freq="h").ordinal + + assert rs == xp + + def test_gaps(self): + ts = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + ts.iloc[5:25] = np.nan + _, ax = mpl.pyplot.subplots() + ts.plot(ax=ax) + lines = ax.get_lines() + assert len(lines) == 1 + line = lines[0] + data = line.get_xydata() + + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[5:25, 1].all() + mpl.pyplot.close(ax.get_figure()) + + def test_gaps_irregular(self): + # irregular + ts = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + ts = ts.iloc[[0, 1, 2, 5, 7, 9, 12, 15, 20]] + ts.iloc[2:5] = np.nan + _, ax = mpl.pyplot.subplots() + ax = ts.plot(ax=ax) + lines = ax.get_lines() + assert len(lines) == 1 + line = lines[0] + data = line.get_xydata() + + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[2:5, 1].all() + mpl.pyplot.close(ax.get_figure()) + + def test_gaps_non_ts(self): + # non-ts + idx = [0, 1, 2, 5, 7, 9, 12, 15, 20] + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + ser.iloc[2:5] = np.nan + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + lines = ax.get_lines() + assert len(lines) == 1 + line = lines[0] + data = line.get_xydata() + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[2:5, 1].all() + + def test_gap_upsample(self): + low = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + low.iloc[5:25] = np.nan + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + + idxh = date_range(low.index[0], low.index[-1], freq="12h") + s = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + s.plot(secondary_y=True) + lines = ax.get_lines() + assert len(lines) == 1 + assert len(ax.right_ax.get_lines()) == 1 + + line = lines[0] + data = line.get_xydata() + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[5:25, 1].all() + + def test_secondary_y(self): + ser = Series(np.random.default_rng(2).standard_normal(10)) + fig, _ = mpl.pyplot.subplots() + ax = ser.plot(secondary_y=True) + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + axes = fig.get_axes() + line = ax.get_lines()[0] + xp = Series(line.get_ydata(), line.get_xdata()) + tm.assert_series_equal(ser, xp) + assert ax.get_yaxis().get_ticks_position() == "right" + assert not axes[0].get_yaxis().get_visible() + mpl.pyplot.close(fig) + + def test_secondary_y_yaxis(self): + Series(np.random.default_rng(2).standard_normal(10)) + ser2 = Series(np.random.default_rng(2).standard_normal(10)) + _, ax2 = mpl.pyplot.subplots() + ser2.plot(ax=ax2) + assert ax2.get_yaxis().get_ticks_position() == "left" + mpl.pyplot.close(ax2.get_figure()) + + def test_secondary_both(self): + ser = Series(np.random.default_rng(2).standard_normal(10)) + ser2 = Series(np.random.default_rng(2).standard_normal(10)) + ax = ser2.plot() + ax2 = ser.plot(secondary_y=True) + assert ax.get_yaxis().get_visible() + assert not hasattr(ax, "left_ax") + assert hasattr(ax, "right_ax") + assert hasattr(ax2, "left_ax") + assert not hasattr(ax2, "right_ax") + + def test_secondary_y_ts(self): + idx = date_range("1/1/2000", periods=10) + ser = Series(np.random.default_rng(2).standard_normal(10), idx) + fig, _ = mpl.pyplot.subplots() + ax = ser.plot(secondary_y=True) + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + axes = fig.get_axes() + line = ax.get_lines()[0] + xp = Series(line.get_ydata(), line.get_xdata()).to_timestamp() + tm.assert_series_equal(ser, xp) + assert ax.get_yaxis().get_ticks_position() == "right" + assert not axes[0].get_yaxis().get_visible() + mpl.pyplot.close(fig) + + def test_secondary_y_ts_yaxis(self): + idx = date_range("1/1/2000", periods=10) + ser2 = Series(np.random.default_rng(2).standard_normal(10), idx) + _, ax2 = mpl.pyplot.subplots() + ser2.plot(ax=ax2) + assert ax2.get_yaxis().get_ticks_position() == "left" + mpl.pyplot.close(ax2.get_figure()) + + def test_secondary_y_ts_visible(self): + idx = date_range("1/1/2000", periods=10) + ser2 = Series(np.random.default_rng(2).standard_normal(10), idx) + ax = ser2.plot() + assert ax.get_yaxis().get_visible() + + def test_secondary_kde(self): + pytest.importorskip("scipy") + ser = Series(np.random.default_rng(2).standard_normal(10)) + fig, ax = mpl.pyplot.subplots() + ax = ser.plot(secondary_y=True, kind="density", ax=ax) + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + axes = fig.get_axes() + assert axes[1].get_yaxis().get_ticks_position() == "right" + + def test_secondary_bar(self): + ser = Series(np.random.default_rng(2).standard_normal(10)) + fig, ax = mpl.pyplot.subplots() + ser.plot(secondary_y=True, kind="bar", ax=ax) + axes = fig.get_axes() + assert axes[1].get_yaxis().get_ticks_position() == "right" + + def test_secondary_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=["a", "b", "c"] + ) + axes = df.plot(secondary_y=["a", "c"], subplots=True) + assert axes[0].get_yaxis().get_ticks_position() == "right" + assert axes[1].get_yaxis().get_ticks_position() == "left" + assert axes[2].get_yaxis().get_ticks_position() == "right" + + def test_secondary_bar_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=["a", "b", "c"] + ) + axes = df.plot(kind="bar", secondary_y=["a", "c"], subplots=True) + assert axes[0].get_yaxis().get_ticks_position() == "right" + assert axes[1].get_yaxis().get_ticks_position() == "left" + assert axes[2].get_yaxis().get_ticks_position() == "right" + + def test_mixed_freq_regular_first(self): + # TODO + s1 = Series( + np.arange(20, dtype=np.float64), + index=date_range("2020-01-01", periods=20, freq="B"), + ) + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15]] + + # it works! + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + + ax2 = s2.plot(style="g", ax=ax) + lines = ax2.get_lines() + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + idx1 = PeriodIndex(lines[0].get_xdata()) + idx2 = PeriodIndex(lines[1].get_xdata()) + + tm.assert_index_equal(idx1, s1.index.to_period("B")) + tm.assert_index_equal(idx2, s2.index.to_period("B")) + + left, right = ax2.get_xlim() + pidx = s1.index.to_period() + assert left <= pidx[0].ordinal + assert right >= pidx[-1].ordinal + + def test_mixed_freq_irregular_first(self): + s1 = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15]] + _, ax = mpl.pyplot.subplots() + s2.plot(style="g", ax=ax) + s1.plot(ax=ax) + assert not hasattr(ax, "freq") + lines = ax.get_lines() + x1 = lines[0].get_xdata() + tm.assert_numpy_array_equal(x1, s2.index.astype(object).values) + x2 = lines[1].get_xdata() + tm.assert_numpy_array_equal(x2, s1.index.astype(object).values) + + def test_mixed_freq_regular_first_df(self): + # GH 9852 + s1 = Series( + np.arange(20, dtype=np.float64), + index=date_range("2020-01-01", periods=20, freq="B"), + ).to_frame() + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + ax2 = s2.plot(style="g", ax=ax) + lines = ax2.get_lines() + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + idx1 = PeriodIndex(lines[0].get_xdata()) + idx2 = PeriodIndex(lines[1].get_xdata()) + assert idx1.equals(s1.index.to_period("B")) + assert idx2.equals(s2.index.to_period("B")) + left, right = ax2.get_xlim() + pidx = s1.index.to_period() + assert left <= pidx[0].ordinal + assert right >= pidx[-1].ordinal + + def test_mixed_freq_irregular_first_df(self): + # GH 9852 + s1 = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ).to_frame() + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] + _, ax = mpl.pyplot.subplots() + s2.plot(style="g", ax=ax) + s1.plot(ax=ax) + assert not hasattr(ax, "freq") + lines = ax.get_lines() + x1 = lines[0].get_xdata() + tm.assert_numpy_array_equal(x1, s2.index.astype(object).values) + x2 = lines[1].get_xdata() + tm.assert_numpy_array_equal(x2, s1.index.astype(object).values) + + def test_mixed_freq_hf_first(self): + idxh = date_range("1/1/1999", periods=365, freq="D") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + low.plot(ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "D" + + def test_mixed_freq_alignment(self): + ts_ind = date_range("2012-01-01 13:00", "2012-01-02", freq="h") + ts_data = np.random.default_rng(2).standard_normal(12) + + ts = Series(ts_data, index=ts_ind) + ts2 = ts.asfreq("min").interpolate() + + _, ax = mpl.pyplot.subplots() + ax = ts.plot(ax=ax) + ts2.plot(style="r", ax=ax) + + assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0] + + def test_mixed_freq_lf_first(self): + idxh = date_range("1/1/1999", periods=365, freq="D") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(legend=True, ax=ax) + high.plot(legend=True, ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "D" + leg = ax.get_legend() + assert len(leg.texts) == 2 + mpl.pyplot.close(ax.get_figure()) + + def test_mixed_freq_lf_first_hourly(self): + idxh = date_range("1/1/1999", periods=240, freq="min") + idxl = date_range("1/1/1999", periods=4, freq="h") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + high.plot(ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "min" + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_mixed_freq_irreg_period(self): + ts = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + irreg = ts.iloc[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]] + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + rng = period_range("1/3/2000", periods=30, freq="B") + ps = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + irreg.plot(ax=ax) + ps.plot(ax=ax) + + def test_mixed_freq_shared_ax(self): + # GH13341, using sharex=True + idx1 = date_range("2015-01-01", periods=3, freq="ME") + idx2 = idx1[:1].union(idx1[2:]) + s1 = Series(range(len(idx1)), idx1) + s2 = Series(range(len(idx2)), idx2) + + _, (ax1, ax2) = mpl.pyplot.subplots(nrows=2, sharex=True) + s1.plot(ax=ax1) + s2.plot(ax=ax2) + + assert ax1.freq == "M" + assert ax2.freq == "M" + assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] + + def test_mixed_freq_shared_ax_twin_x(self): + # GH13341, using sharex=True + idx1 = date_range("2015-01-01", periods=3, freq="ME") + idx2 = idx1[:1].union(idx1[2:]) + s1 = Series(range(len(idx1)), idx1) + s2 = Series(range(len(idx2)), idx2) + # using twinx + _, ax1 = mpl.pyplot.subplots() + ax2 = ax1.twinx() + s1.plot(ax=ax1) + s2.plot(ax=ax2) + + assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] + + @pytest.mark.xfail(reason="TODO (GH14330, GH14322)") + def test_mixed_freq_shared_ax_twin_x_irregular_first(self): + # GH13341, using sharex=True + idx1 = date_range("2015-01-01", periods=3, freq="M") + idx2 = idx1[:1].union(idx1[2:]) + s1 = Series(range(len(idx1)), idx1) + s2 = Series(range(len(idx2)), idx2) + _, ax1 = mpl.pyplot.subplots() + ax2 = ax1.twinx() + s2.plot(ax=ax1) + s1.plot(ax=ax2) + assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] + + def test_nat_handling(self): + _, ax = mpl.pyplot.subplots() + + dti = DatetimeIndex(["2015-01-01", NaT, "2015-01-03"]) + s = Series(range(len(dti)), dti) + s.plot(ax=ax) + xdata = ax.get_lines()[0].get_xdata() + # plot x data is bounded by index values + assert s.index.min() <= Series(xdata).min() + assert Series(xdata).max() <= s.index.max() + + def test_to_weekly_resampling_disallow_how_kwd(self): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + + msg = ( + "'how' is not a valid keyword for plotting functions. If plotting " + "multiple objects on shared axes, resample manually first." + ) + with pytest.raises(ValueError, match=msg): + low.plot(ax=ax, how="foo") + + def test_to_weekly_resampling(self): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + low.plot(ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + + def test_from_weekly_resampling(self): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + high.plot(ax=ax) + + expected_h = idxh.to_period().asi8.astype(np.float64) + expected_l = np.array( + [1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562], + dtype=np.float64, + ) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + xdata = line.get_xdata(orig=False) + if len(xdata) == 12: # idxl lines + tm.assert_numpy_array_equal(xdata, expected_l) + else: + tm.assert_numpy_array_equal(xdata, expected_h) + + @pytest.mark.parametrize("kind1, kind2", [("line", "area"), ("area", "line")]) + def test_from_resampling_area_line_mixed(self, kind1, kind2): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = DataFrame( + np.random.default_rng(2).random((len(idxh), 3)), + index=idxh, + columns=[0, 1, 2], + ) + low = DataFrame( + np.random.default_rng(2).random((len(idxl), 3)), + index=idxl, + columns=[0, 1, 2], + ) + + _, ax = mpl.pyplot.subplots() + low.plot(kind=kind1, stacked=True, ax=ax) + high.plot(kind=kind2, stacked=True, ax=ax) + + # check low dataframe result + expected_x = np.array( + [ + 1514, + 1519, + 1523, + 1527, + 1531, + 1536, + 1540, + 1544, + 1549, + 1553, + 1558, + 1562, + ], + dtype=np.float64, + ) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + line = ax.lines[i] + assert PeriodIndex(line.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) + # check stacked values are correct + expected_y += low[i].values + tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) + + # check high dataframe result + expected_x = idxh.to_period().asi8.astype(np.float64) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + line = ax.lines[3 + i] + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) + expected_y += high[i].values + tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) + + @pytest.mark.parametrize("kind1, kind2", [("line", "area"), ("area", "line")]) + def test_from_resampling_area_line_mixed_high_to_low(self, kind1, kind2): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = DataFrame( + np.random.default_rng(2).random((len(idxh), 3)), + index=idxh, + columns=[0, 1, 2], + ) + low = DataFrame( + np.random.default_rng(2).random((len(idxl), 3)), + index=idxl, + columns=[0, 1, 2], + ) + _, ax = mpl.pyplot.subplots() + high.plot(kind=kind1, stacked=True, ax=ax) + low.plot(kind=kind2, stacked=True, ax=ax) + + # check high dataframe result + expected_x = idxh.to_period().asi8.astype(np.float64) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + line = ax.lines[i] + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) + expected_y += high[i].values + tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) + + # check low dataframe result + expected_x = np.array( + [ + 1514, + 1519, + 1523, + 1527, + 1531, + 1536, + 1540, + 1544, + 1549, + 1553, + 1558, + 1562, + ], + dtype=np.float64, + ) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + lines = ax.lines[3 + i] + assert PeriodIndex(data=lines.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(lines.get_xdata(orig=False), expected_x) + expected_y += low[i].values + tm.assert_numpy_array_equal(lines.get_ydata(orig=False), expected_y) + + def test_mixed_freq_second_millisecond(self): + # GH 7772, GH 7760 + idxh = date_range("2014-07-01 09:00", freq="s", periods=50) + idxl = date_range("2014-07-01 09:00", freq="100ms", periods=500) + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + # high to low + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + low.plot(ax=ax) + assert len(ax.get_lines()) == 2 + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "ms" + + def test_mixed_freq_second_millisecond_low_to_high(self): + # GH 7772, GH 7760 + idxh = date_range("2014-07-01 09:00", freq="s", periods=50) + idxl = date_range("2014-07-01 09:00", freq="100ms", periods=500) + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + # low to high + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + high.plot(ax=ax) + assert len(ax.get_lines()) == 2 + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "ms" + + def test_irreg_dtypes(self): + # date + idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)] + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + Index(idx, dtype=object), + ) + _check_plot_works(df.plot) + + def test_irreg_dtypes_dt64(self): + # np.datetime64 + idx = date_range("1/1/2000", periods=10) + idx = idx[[0, 2, 5, 9]].astype(object) + df = DataFrame(np.random.default_rng(2).standard_normal((len(idx), 3)), idx) + _, ax = mpl.pyplot.subplots() + _check_plot_works(df.plot, ax=ax) + + def test_time(self): + t = datetime(1, 1, 1, 3, 30, 0) + deltas = np.random.default_rng(2).integers(1, 20, 3).cumsum() + ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas]) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(len(ts)), + "b": np.random.default_rng(2).standard_normal(len(ts)), + }, + index=ts, + ) + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + + # verify tick labels + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if s != 0: + xp = time(h, m, s).strftime("%H:%M:%S") + else: + xp = time(h, m, s).strftime("%H:%M") + assert xp == rs + + def test_time_change_xlim(self): + t = datetime(1, 1, 1, 3, 30, 0) + deltas = np.random.default_rng(2).integers(1, 20, 3).cumsum() + ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas]) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(len(ts)), + "b": np.random.default_rng(2).standard_normal(len(ts)), + }, + index=ts, + ) + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + + # verify tick labels + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if s != 0: + xp = time(h, m, s).strftime("%H:%M:%S") + else: + xp = time(h, m, s).strftime("%H:%M") + assert xp == rs + + # change xlim + ax.set_xlim("1:30", "5:00") + + # check tick labels again + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if s != 0: + xp = time(h, m, s).strftime("%H:%M:%S") + else: + xp = time(h, m, s).strftime("%H:%M") + assert xp == rs + + def test_time_musec(self): + t = datetime(1, 1, 1, 3, 30, 0) + deltas = np.random.default_rng(2).integers(1, 20, 3).cumsum() + ts = np.array([(t + timedelta(microseconds=int(x))).time() for x in deltas]) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(len(ts)), + "b": np.random.default_rng(2).standard_normal(len(ts)), + }, + index=ts, + ) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + + # verify tick labels + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + + us = round((_tick - int(_tick)) * 1e6) + + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if (us % 1000) != 0: + xp = time(h, m, s, us).strftime("%H:%M:%S.%f") + elif (us // 1000) != 0: + xp = time(h, m, s, us).strftime("%H:%M:%S.%f")[:-3] + elif s != 0: + xp = time(h, m, s, us).strftime("%H:%M:%S") + else: + xp = time(h, m, s, us).strftime("%H:%M") + assert xp == rs + + def test_secondary_upsample(self): + idxh = date_range("1/1/1999", periods=365, freq="D") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + ax = high.plot(secondary_y=True, ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(line.get_xdata()).freq == "D" + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + for line in ax.left_ax.get_lines(): + assert PeriodIndex(line.get_xdata()).freq == "D" + + def test_secondary_legend(self): + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + + # ts + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df.plot(secondary_y=["A", "B"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert leg.get_texts()[0].get_text() == "A (right)" + assert leg.get_texts()[1].get_text() == "B (right)" + assert leg.get_texts()[2].get_text() == "C" + assert leg.get_texts()[3].get_text() == "D" + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + mpl.pyplot.close(fig) + + def test_secondary_legend_right(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + df.plot(secondary_y=["A", "C"], mark_right=False, ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert leg.get_texts()[0].get_text() == "A" + assert leg.get_texts()[1].get_text() == "B" + assert leg.get_texts()[2].get_text() == "C" + assert leg.get_texts()[3].get_text() == "D" + mpl.pyplot.close(fig) + + def test_secondary_legend_bar(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig, ax = mpl.pyplot.subplots() + df.plot(kind="bar", secondary_y=["A"], ax=ax) + leg = ax.get_legend() + assert leg.get_texts()[0].get_text() == "A (right)" + assert leg.get_texts()[1].get_text() == "B" + mpl.pyplot.close(fig) + + def test_secondary_legend_bar_right(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig, ax = mpl.pyplot.subplots() + df.plot(kind="bar", secondary_y=["A"], mark_right=False, ax=ax) + leg = ax.get_legend() + assert leg.get_texts()[0].get_text() == "A" + assert leg.get_texts()[1].get_text() == "B" + mpl.pyplot.close(fig) + + def test_secondary_legend_multi_col(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ax = df.plot(secondary_y=["C", "D"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + mpl.pyplot.close(fig) + + def test_secondary_legend_nonts(self): + # non-ts + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + ax = df.plot(secondary_y=["A", "B"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + mpl.pyplot.close() + + def test_secondary_legend_nonts_multi_col(self): + # non-ts + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + ax = df.plot(secondary_y=["C", "D"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_format_date_axis(self): + rng = date_range("1/1/2012", periods=12, freq="ME") + df = DataFrame(np.random.default_rng(2).standard_normal((len(rng), 3)), rng) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + xaxis = ax.get_xaxis() + for line in xaxis.get_ticklabels(): + if len(line.get_text()) > 0: + assert line.get_rotation() == 30 + + def test_ax_plot(self): + x = date_range(start="2012-01-02", periods=10, freq="D") + y = list(range(len(x))) + _, ax = mpl.pyplot.subplots() + lines = ax.plot(x, y, label="Y") + tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x) + + def test_mpl_nopandas(self): + dates = [date(2008, 12, 31), date(2009, 1, 31)] + values1 = np.arange(10.0, 11.0, 0.5) + values2 = np.arange(11.0, 12.0, 0.5) + + kw = {"fmt": "-", "lw": 4} + + _, ax = mpl.pyplot.subplots() + ax.plot_date([x.toordinal() for x in dates], values1, **kw) + ax.plot_date([x.toordinal() for x in dates], values2, **kw) + + line1, line2 = ax.get_lines() + + exp = np.array([x.toordinal() for x in dates], dtype=np.float64) + tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp) + exp = np.array([x.toordinal() for x in dates], dtype=np.float64) + tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp) + + def test_irregular_ts_shared_ax_xlim(self): + # GH 2960 + from pandas.plotting._matplotlib.converter import DatetimeConverter + + ts = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + ts_irregular = ts.iloc[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] + + # plot the left section of the irregular series, then the right section + _, ax = mpl.pyplot.subplots() + ts_irregular[:5].plot(ax=ax) + ts_irregular[5:].plot(ax=ax) + + # check that axis limits are correct + left, right = ax.get_xlim() + assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) + assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) + + def test_secondary_y_non_ts_xlim(self): + # GH 3490 - non-timeseries with secondary y + index_1 = [1, 2, 3, 4] + index_2 = [5, 6, 7, 8] + s1 = Series(1, index=index_1) + s2 = Series(2, index=index_2) + + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + left_before, right_before = ax.get_xlim() + s2.plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + assert left_before >= left_after + assert right_before < right_after + + def test_secondary_y_regular_ts_xlim(self): + # GH 3490 - regular-timeseries with secondary y + index_1 = date_range(start="2000-01-01", periods=4, freq="D") + index_2 = date_range(start="2000-01-05", periods=4, freq="D") + s1 = Series(1, index=index_1) + s2 = Series(2, index=index_2) + + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + left_before, right_before = ax.get_xlim() + s2.plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + assert left_before >= left_after + assert right_before < right_after + + def test_secondary_y_mixed_freq_ts_xlim(self): + # GH 3490 - mixed frequency timeseries with secondary y + rng = date_range("2000-01-01", periods=10000, freq="min") + ts = Series(1, index=rng) + + _, ax = mpl.pyplot.subplots() + ts.plot(ax=ax) + left_before, right_before = ax.get_xlim() + ts.resample("D").mean().plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + # a downsample should not have changed either limit + assert left_before == left_after + assert right_before == right_after + + def test_secondary_y_irregular_ts_xlim(self): + # GH 3490 - irregular-timeseries with secondary y + from pandas.plotting._matplotlib.converter import DatetimeConverter + + ts = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + ts_irregular = ts.iloc[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] + + _, ax = mpl.pyplot.subplots() + ts_irregular[:5].plot(ax=ax) + # plot higher-x values on secondary axis + ts_irregular[5:].plot(secondary_y=True, ax=ax) + # ensure secondary limits aren't overwritten by plot on primary + ts_irregular[:5].plot(ax=ax) + + left, right = ax.get_xlim() + assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) + assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) + + def test_plot_outofbounds_datetime(self): + # 2579 - checking this does not raise + values = [date(1677, 1, 1), date(1677, 1, 2)] + _, ax = mpl.pyplot.subplots() + ax.plot(values) + + values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)] + ax.plot(values) + + def test_format_timedelta_ticks_narrow(self): + expected_labels = [f"00:00:00.0000000{i:0>2d}" for i in np.arange(10)] + + rng = timedelta_range("0", periods=10, freq="ns") + df = DataFrame(np.random.default_rng(2).standard_normal((len(rng), 3)), rng) + _, ax = mpl.pyplot.subplots() + df.plot(fontsize=2, ax=ax) + mpl.pyplot.draw() + labels = ax.get_xticklabels() + + result_labels = [x.get_text() for x in labels] + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels + + def test_format_timedelta_ticks_wide(self): + expected_labels = [ + "00:00:00", + "1 days 03:46:40", + "2 days 07:33:20", + "3 days 11:20:00", + "4 days 15:06:40", + "5 days 18:53:20", + "6 days 22:40:00", + "8 days 02:26:40", + "9 days 06:13:20", + ] + + rng = timedelta_range("0", periods=10, freq="1 d") + df = DataFrame(np.random.default_rng(2).standard_normal((len(rng), 3)), rng) + _, ax = mpl.pyplot.subplots() + ax = df.plot(fontsize=2, ax=ax) + mpl.pyplot.draw() + labels = ax.get_xticklabels() + + result_labels = [x.get_text() for x in labels] + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels + + def test_timedelta_plot(self): + # test issue #8711 + s = Series(range(5), timedelta_range("1day", periods=5)) + _, ax = mpl.pyplot.subplots() + _check_plot_works(s.plot, ax=ax) + + def test_timedelta_long_period(self): + # test long period + index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 d") + s = Series(np.random.default_rng(2).standard_normal(len(index)), index) + _, ax = mpl.pyplot.subplots() + _check_plot_works(s.plot, ax=ax) + + def test_timedelta_short_period(self): + # test short period + index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 ns") + s = Series(np.random.default_rng(2).standard_normal(len(index)), index) + _, ax = mpl.pyplot.subplots() + _check_plot_works(s.plot, ax=ax) + + def test_hist(self): + # https://github.com/matplotlib/matplotlib/issues/8459 + rng = date_range("1/1/2011", periods=10, freq="h") + x = rng + w1 = np.arange(0, 1, 0.1) + w2 = np.arange(0, 1, 0.1)[::-1] + _, ax = mpl.pyplot.subplots() + ax.hist([x, x], weights=[w1, w2]) + + def test_overlapping_datetime(self): + # GB 6608 + s1 = Series( + [1, 2, 3], + index=[ + datetime(1995, 12, 31), + datetime(2000, 12, 31), + datetime(2005, 12, 31), + ], + ) + s2 = Series( + [1, 2, 3], + index=[ + datetime(1997, 12, 31), + datetime(2003, 12, 31), + datetime(2008, 12, 31), + ], + ) + + # plot first series, then add the second series to those axes, + # then try adding the first series again + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + s2.plot(ax=ax) + s1.plot(ax=ax) + + @pytest.mark.xfail(reason="GH9053 matplotlib does not use ax.xaxis.converter") + def test_add_matplotlib_datetime64(self): + # GH9053 - ensure that a plot with PeriodConverter still understands + # datetime64 data. This still fails because matplotlib overrides the + # ax.xaxis.converter with a DatetimeConverter + s = Series( + np.random.default_rng(2).standard_normal(10), + index=date_range("1970-01-02", periods=10), + ) + ax = s.plot() + with tm.assert_produces_warning(DeprecationWarning): + # multi-dimensional indexing + ax.plot(s.index, s.values, color="g") + l1, l2 = ax.lines + tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata()) + + def test_matplotlib_scatter_datetime64(self): + # https://github.com/matplotlib/matplotlib/issues/11391 + df = DataFrame(np.random.default_rng(2).random((10, 2)), columns=["x", "y"]) + df["time"] = date_range("2018-01-01", periods=10, freq="D") + _, ax = mpl.pyplot.subplots() + ax.scatter(x="time", y="y", data=df) + mpl.pyplot.draw() + label = ax.get_xticklabels()[0] + expected = "2018-01-01" + assert label.get_text() == expected + + def test_check_xticks_rot(self): + # https://github.com/pandas-dev/pandas/issues/29460 + # regular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-03"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + axes = df.plot(x="x", y="y") + _check_ticks_props(axes, xrot=0) + + def test_check_xticks_rot_irregular(self): + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + axes = df.plot(x="x", y="y") + _check_ticks_props(axes, xrot=30) + + def test_check_xticks_rot_use_idx(self): + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + # use timeseries index or not + axes = df.set_index("x").plot(y="y", use_index=True) + _check_ticks_props(axes, xrot=30) + axes = df.set_index("x").plot(y="y", use_index=False) + _check_ticks_props(axes, xrot=0) + + def test_check_xticks_rot_sharex(self): + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + # separate subplots + axes = df.plot(x="x", y="y", subplots=True, sharex=True) + _check_ticks_props(axes, xrot=30) + axes = df.plot(x="x", y="y", subplots=True, sharex=False) + _check_ticks_props(axes, xrot=0) + + +def _check_plot_works(f, freq=None, series=None, *args, **kwargs): + import matplotlib.pyplot as plt + + fig = plt.gcf() + + try: + plt.clf() + ax = fig.add_subplot(211) + orig_ax = kwargs.pop("ax", plt.gca()) + orig_axfreq = getattr(orig_ax, "freq", None) + + ret = f(*args, **kwargs) + assert ret is not None # do something more intelligent + + ax = kwargs.pop("ax", plt.gca()) + if series is not None: + dfreq = series.index.freq + if isinstance(dfreq, BaseOffset): + dfreq = dfreq.rule_code + if orig_axfreq is None: + assert ax.freq == dfreq + + if freq is not None: + ax_freq = to_offset(ax.freq, is_period=True) + if freq is not None and orig_axfreq is None: + assert ax_freq == freq + + ax = fig.add_subplot(212) + kwargs["ax"] = ax + ret = f(*args, **kwargs) + assert ret is not None # TODO: do something more intelligent + + # GH18439, GH#24088, statsmodels#4772 + with tm.ensure_clean(return_filelike=True) as path: + pickle.dump(fig, path) + finally: + plt.close(fig) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_groupby.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..5ebf93510a61549c838d91ab2e703f9db23fd626 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_groupby.py @@ -0,0 +1,155 @@ +""" Test cases for GroupBy.plot """ + + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_legend_labels, +) + +pytest.importorskip("matplotlib") + + +class TestDataFrameGroupByPlots: + def test_series_groupby_plotting_nominally_works(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + + weight.groupby(gender).plot() + + def test_series_groupby_plotting_nominally_works_hist(self): + n = 10 + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + height.groupby(gender).hist() + + def test_series_groupby_plotting_nominally_works_alpha(self): + n = 10 + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + # Regression test for GH8733 + height.groupby(gender).plot(alpha=0.5) + + def test_plotting_with_float_index_works(self): + # GH 7025 + df = DataFrame( + { + "def": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "val": np.random.default_rng(2).standard_normal(9), + }, + index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0], + ) + + df.groupby("def")["val"].plot() + + def test_plotting_with_float_index_works_apply(self): + # GH 7025 + df = DataFrame( + { + "def": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "val": np.random.default_rng(2).standard_normal(9), + }, + index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0], + ) + df.groupby("def")["val"].apply(lambda x: x.plot()) + + def test_hist_single_row(self): + # GH10214 + bins = np.arange(80, 100 + 2, 1) + df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]}) + df["Mark"].hist(by=df["ByCol"], bins=bins) + + def test_hist_single_row_single_bycol(self): + # GH10214 + bins = np.arange(80, 100 + 2, 1) + df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]}) + df["Mark"].hist(by=df["ByCol"], bins=bins) + + def test_plot_submethod_works(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + df.groupby("z").plot.scatter("x", "y") + + def test_plot_submethod_works_line(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + df.groupby("z")["x"].plot.line() + + def test_plot_kwargs(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + + res = df.groupby("z").plot(kind="scatter", x="x", y="y") + # check that a scatter plot is effectively plotted: the axes should + # contain a PathCollection from the scatter plot (GH11805) + assert len(res["a"].collections) == 1 + + def test_plot_kwargs_scatter(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + res = df.groupby("z").plot.scatter(x="x", y="y") + assert len(res["a"].collections) == 1 + + @pytest.mark.parametrize("column, expected_axes_num", [(None, 2), ("b", 1)]) + def test_groupby_hist_frame_with_legend(self, column, expected_axes_num): + # GH 6279 - DataFrameGroupBy histogram can have a legend + expected_layout = (1, expected_axes_num) + expected_labels = column or [["a"], ["b"]] + + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + for axes in g.hist(legend=True, column=column): + _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) + for ax, expected_label in zip(axes[0], expected_labels): + _check_legend_labels(ax, expected_label) + + @pytest.mark.parametrize("column", [None, "b"]) + def test_groupby_hist_frame_with_legend_raises(self, column): + # GH 6279 - DataFrameGroupBy histogram with legend and label raises + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + g.hist(legend=True, column=column, label="d") + + def test_groupby_hist_series_with_legend(self): + # GH 6279 - SeriesGroupBy histogram can have a legend + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + for ax in g["a"].hist(legend=True): + _check_axes_shape(ax, axes_num=1, layout=(1, 1)) + _check_legend_labels(ax, ["1", "2"]) + + def test_groupby_hist_series_with_legend_raises(self): + # GH 6279 - SeriesGroupBy histogram with legend and label raises + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + g.hist(legend=True, label="d") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_hist_method.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_hist_method.py new file mode 100644 index 0000000000000000000000000000000000000000..4d17f87fdc7bc1456a118c84b76c631544572fd4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_hist_method.py @@ -0,0 +1,971 @@ +""" Test cases for .hist method """ +import re + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, + date_range, + to_datetime, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_ax_scales, + _check_axes_shape, + _check_colors, + _check_legend_labels, + _check_patches_all_filled, + _check_plot_works, + _check_text_labels, + _check_ticks_props, + get_x_axis, + get_y_axis, +) + +mpl = pytest.importorskip("matplotlib") + + +@pytest.fixture +def ts(): + return Series( + np.arange(30, dtype=np.float64), + index=date_range("2020-01-01", periods=30, freq="B"), + name="ts", + ) + + +class TestSeriesPlots: + @pytest.mark.parametrize("kwargs", [{}, {"grid": False}, {"figsize": (8, 10)}]) + def test_hist_legacy_kwargs(self, ts, kwargs): + _check_plot_works(ts.hist, **kwargs) + + @pytest.mark.parametrize("kwargs", [{}, {"bins": 5}]) + def test_hist_legacy_kwargs_warning(self, ts, kwargs): + # _check_plot_works adds an ax so catch warning. see GH #13188 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(ts.hist, by=ts.index.month, **kwargs) + + def test_hist_legacy_ax(self, ts): + fig, ax = mpl.pyplot.subplots(1, 1) + _check_plot_works(ts.hist, ax=ax, default_axes=True) + + def test_hist_legacy_ax_and_fig(self, ts): + fig, ax = mpl.pyplot.subplots(1, 1) + _check_plot_works(ts.hist, ax=ax, figure=fig, default_axes=True) + + def test_hist_legacy_fig(self, ts): + fig, _ = mpl.pyplot.subplots(1, 1) + _check_plot_works(ts.hist, figure=fig, default_axes=True) + + def test_hist_legacy_multi_ax(self, ts): + fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2) + _check_plot_works(ts.hist, figure=fig, ax=ax1, default_axes=True) + _check_plot_works(ts.hist, figure=fig, ax=ax2, default_axes=True) + + def test_hist_legacy_by_fig_error(self, ts): + fig, _ = mpl.pyplot.subplots(1, 1) + msg = ( + "Cannot pass 'figure' when using the 'by' argument, since a new 'Figure' " + "instance will be created" + ) + with pytest.raises(ValueError, match=msg): + ts.hist(by=ts.index, figure=fig) + + def test_hist_bins_legacy(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + ax = df.hist(bins=2)[0][0] + assert len(ax.patches) == 2 + + def test_hist_layout(self, hist_df): + df = hist_df + msg = "The 'layout' keyword is not supported when 'by' is None" + with pytest.raises(ValueError, match=msg): + df.height.hist(layout=(1, 1)) + + with pytest.raises(ValueError, match=msg): + df.height.hist(layout=[1, 1]) + + @pytest.mark.slow + @pytest.mark.parametrize( + "by, layout, axes_num, res_layout", + [ + ["gender", (2, 1), 2, (2, 1)], + ["gender", (3, -1), 2, (3, 1)], + ["category", (4, 1), 4, (4, 1)], + ["category", (2, -1), 4, (2, 2)], + ["category", (3, -1), 4, (3, 2)], + ["category", (-1, 4), 4, (1, 4)], + ["classroom", (2, 2), 3, (2, 2)], + ], + ) + def test_hist_layout_with_by(self, hist_df, by, layout, axes_num, res_layout): + df = hist_df + + # _check_plot_works adds an `ax` kwarg to the method call + # so we get a warning about an axis being cleared, even + # though we don't explicing pass one, see GH #13188 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.height.hist, by=getattr(df, by), layout=layout) + _check_axes_shape(axes, axes_num=axes_num, layout=res_layout) + + def test_hist_layout_with_by_shape(self, hist_df): + df = hist_df + + axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7)) + _check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7)) + + def test_hist_no_overlap(self): + from matplotlib.pyplot import ( + gcf, + subplot, + ) + + x = Series(np.random.default_rng(2).standard_normal(2)) + y = Series(np.random.default_rng(2).standard_normal(2)) + subplot(121) + x.hist() + subplot(122) + y.hist() + fig = gcf() + axes = fig.axes + assert len(axes) == 2 + + def test_hist_by_no_extra_plots(self, hist_df): + df = hist_df + df.height.hist(by=df.gender) + assert len(mpl.pyplot.get_fignums()) == 1 + + def test_plot_fails_when_ax_differs_from_figure(self, ts): + from pylab import figure + + fig1 = figure() + fig2 = figure() + ax1 = fig1.add_subplot(111) + msg = "passed axis not bound to passed figure" + with pytest.raises(AssertionError, match=msg): + ts.hist(ax=ax1, figure=fig2) + + @pytest.mark.parametrize( + "histtype, expected", + [ + ("bar", True), + ("barstacked", True), + ("step", False), + ("stepfilled", True), + ], + ) + def test_histtype_argument(self, histtype, expected): + # GH23992 Verify functioning of histtype argument + ser = Series(np.random.default_rng(2).integers(1, 10)) + ax = ser.hist(histtype=histtype) + _check_patches_all_filled(ax, filled=expected) + + @pytest.mark.parametrize( + "by, expected_axes_num, expected_layout", [(None, 1, (1, 1)), ("b", 2, (1, 2))] + ) + def test_hist_with_legend(self, by, expected_axes_num, expected_layout): + # GH 6279 - Series histogram can have a legend + index = 15 * ["1"] + 15 * ["2"] + s = Series(np.random.default_rng(2).standard_normal(30), index=index, name="a") + s.index.name = "b" + + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works(s.hist, default_axes=True, legend=True, by=by) + _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) + _check_legend_labels(axes, "a") + + @pytest.mark.parametrize("by", [None, "b"]) + def test_hist_with_legend_raises(self, by): + # GH 6279 - Series histogram with legend and label raises + index = 15 * ["1"] + 15 * ["2"] + s = Series(np.random.default_rng(2).standard_normal(30), index=index, name="a") + s.index.name = "b" + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + s.hist(legend=True, by=by, label="c") + + def test_hist_kwargs(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(bins=5, ax=ax) + assert len(ax.patches) == 5 + _check_text_labels(ax.yaxis.get_label(), "Frequency") + + def test_hist_kwargs_horizontal(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(bins=5, ax=ax) + ax = ts.plot.hist(orientation="horizontal", ax=ax) + _check_text_labels(ax.xaxis.get_label(), "Frequency") + + def test_hist_kwargs_align(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(bins=5, ax=ax) + ax = ts.plot.hist(align="left", stacked=True, ax=ax) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_hist_kde(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(logy=True, ax=ax) + _check_ax_scales(ax, yaxis="log") + xlabels = ax.get_xticklabels() + # ticks are values, thus ticklabels are blank + _check_text_labels(xlabels, [""] * len(xlabels)) + ylabels = ax.get_yticklabels() + _check_text_labels(ylabels, [""] * len(ylabels)) + + def test_hist_kde_plot_works(self, ts): + pytest.importorskip("scipy") + _check_plot_works(ts.plot.kde) + + def test_hist_kde_density_works(self, ts): + pytest.importorskip("scipy") + _check_plot_works(ts.plot.density) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_hist_kde_logy(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.kde(logy=True, ax=ax) + _check_ax_scales(ax, yaxis="log") + xlabels = ax.get_xticklabels() + _check_text_labels(xlabels, [""] * len(xlabels)) + ylabels = ax.get_yticklabels() + _check_text_labels(ylabels, [""] * len(ylabels)) + + def test_hist_kde_color_bins(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(logy=True, bins=10, color="b", ax=ax) + _check_ax_scales(ax, yaxis="log") + assert len(ax.patches) == 10 + _check_colors(ax.patches, facecolors=["b"] * 10) + + def test_hist_kde_color(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.kde(logy=True, color="r", ax=ax) + _check_ax_scales(ax, yaxis="log") + lines = ax.get_lines() + assert len(lines) == 1 + _check_colors(lines, ["r"]) + + +class TestDataFramePlots: + @pytest.mark.slow + def test_hist_df_legacy(self, hist_df): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(hist_df.hist) + + @pytest.mark.slow + def test_hist_df_legacy_layout(self): + # make sure layout is handled + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.hist, grid=False) + _check_axes_shape(axes, axes_num=3, layout=(2, 2)) + assert not axes[1, 1].get_visible() + + _check_plot_works(df[[2]].hist) + + @pytest.mark.slow + def test_hist_df_legacy_layout2(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 1))) + _check_plot_works(df.hist) + + @pytest.mark.slow + def test_hist_df_legacy_layout3(self): + # make sure layout is handled + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + df[5] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.hist, layout=(4, 2)) + _check_axes_shape(axes, axes_num=6, layout=(4, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", [{"sharex": True, "sharey": True}, {"figsize": (8, 10)}, {"bins": 5}] + ) + def test_hist_df_legacy_layout_kwargs(self, kwargs): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + df[5] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + # make sure sharex, sharey is handled + # handle figsize arg + # check bins argument + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(df.hist, **kwargs) + + @pytest.mark.slow + def test_hist_df_legacy_layout_labelsize_rot(self, frame_or_series): + # make sure xlabelsize and xrot are handled + obj = frame_or_series(range(10)) + xf, yf = 20, 18 + xrot, yrot = 30, 40 + axes = obj.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + _check_ticks_props(axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + + @pytest.mark.slow + def test_hist_df_legacy_rectangles(self): + from matplotlib.patches import Rectangle + + ser = Series(range(10)) + ax = ser.hist(cumulative=True, bins=4, density=True) + # height of last bin (index 5) must be 1.0 + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + tm.assert_almost_equal(rects[-1].get_height(), 1.0) + + @pytest.mark.slow + def test_hist_df_legacy_scale(self): + ser = Series(range(10)) + ax = ser.hist(log=True) + # scale of y must be 'log' + _check_ax_scales(ax, yaxis="log") + + @pytest.mark.slow + def test_hist_df_legacy_external_error(self): + ser = Series(range(10)) + # propagate attr exception from matplotlib.Axes.hist + with tm.external_error_raised(AttributeError): + ser.hist(foo="bar") + + def test_hist_non_numerical_or_datetime_raises(self): + # gh-10444, GH32590 + df = DataFrame( + { + "a": np.random.default_rng(2).random(10), + "b": np.random.default_rng(2).integers(0, 10, 10), + "c": to_datetime( + np.random.default_rng(2).integers( + 1582800000000000000, 1583500000000000000, 10, dtype=np.int64 + ) + ), + "d": to_datetime( + np.random.default_rng(2).integers( + 1582800000000000000, 1583500000000000000, 10, dtype=np.int64 + ), + utc=True, + ), + } + ) + df_o = df.astype(object) + + msg = "hist method requires numerical or datetime columns, nothing to plot." + with pytest.raises(ValueError, match=msg): + df_o.hist() + + @pytest.mark.parametrize( + "layout_test", + ( + {"layout": None, "expected_size": (2, 2)}, # default is 2x2 + {"layout": (2, 2), "expected_size": (2, 2)}, + {"layout": (4, 1), "expected_size": (4, 1)}, + {"layout": (1, 4), "expected_size": (1, 4)}, + {"layout": (3, 3), "expected_size": (3, 3)}, + {"layout": (-1, 4), "expected_size": (1, 4)}, + {"layout": (4, -1), "expected_size": (4, 1)}, + {"layout": (-1, 2), "expected_size": (2, 2)}, + {"layout": (2, -1), "expected_size": (2, 2)}, + ), + ) + def test_hist_layout(self, layout_test): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + axes = df.hist(layout=layout_test["layout"]) + expected = layout_test["expected_size"] + _check_axes_shape(axes, axes_num=3, layout=expected) + + def test_hist_layout_error(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + # layout too small for all 4 plots + msg = "Layout of 1x1 must be larger than required size 3" + with pytest.raises(ValueError, match=msg): + df.hist(layout=(1, 1)) + + # invalid format for layout + msg = re.escape("Layout must be a tuple of (rows, columns)") + with pytest.raises(ValueError, match=msg): + df.hist(layout=(1,)) + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.hist(layout=(-1, -1)) + + # GH 9351 + def test_tight_layout(self): + df = DataFrame(np.random.default_rng(2).standard_normal((100, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=100, + dtype=np.int64, + ) + ) + # Use default_axes=True when plotting method generate subplots itself + _check_plot_works(df.hist, default_axes=True) + mpl.pyplot.tight_layout() + + def test_hist_subplot_xrot(self): + # GH 30288 + df = DataFrame( + { + "length": [1.5, 0.5, 1.2, 0.9, 3], + "animal": ["pig", "rabbit", "pig", "pig", "rabbit"], + } + ) + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + column="length", + by="animal", + bins=5, + xrot=0, + ) + _check_ticks_props(axes, xrot=0) + + @pytest.mark.parametrize( + "column, expected", + [ + (None, ["width", "length", "height"]), + (["length", "width", "height"], ["length", "width", "height"]), + ], + ) + def test_hist_column_order_unchanged(self, column, expected): + # GH29235 + + df = DataFrame( + { + "width": [0.7, 0.2, 0.15, 0.2, 1.1], + "length": [1.5, 0.5, 1.2, 0.9, 3], + "height": [3, 0.5, 3.4, 2, 1], + }, + index=["pig", "rabbit", "duck", "chicken", "horse"], + ) + + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + column=column, + layout=(1, 3), + ) + result = [axes[0, i].get_title() for i in range(3)] + assert result == expected + + @pytest.mark.parametrize( + "histtype, expected", + [ + ("bar", True), + ("barstacked", True), + ("step", False), + ("stepfilled", True), + ], + ) + def test_histtype_argument(self, histtype, expected): + # GH23992 Verify functioning of histtype argument + df = DataFrame( + np.random.default_rng(2).integers(1, 10, size=(100, 2)), columns=["a", "b"] + ) + ax = df.hist(histtype=histtype) + _check_patches_all_filled(ax, filled=expected) + + @pytest.mark.parametrize("by", [None, "c"]) + @pytest.mark.parametrize("column", [None, "b"]) + def test_hist_with_legend(self, by, column): + # GH 6279 - DataFrame histogram can have a legend + expected_axes_num = 1 if by is None and column is not None else 2 + expected_layout = (1, expected_axes_num) + expected_labels = column or ["a", "b"] + if by is not None: + expected_labels = [expected_labels] * 2 + + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + legend=True, + by=by, + column=column, + ) + + _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) + if by is None and column is None: + axes = axes[0] + for expected_label, ax in zip(expected_labels, axes): + _check_legend_labels(ax, expected_label) + + @pytest.mark.parametrize("by", [None, "c"]) + @pytest.mark.parametrize("column", [None, "b"]) + def test_hist_with_legend_raises(self, by, column): + # GH 6279 - DataFrame histogram with legend and label raises + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + df.hist(legend=True, by=by, column=column, label="d") + + def test_hist_df_kwargs(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + _, ax = mpl.pyplot.subplots() + ax = df.plot.hist(bins=5, ax=ax) + assert len(ax.patches) == 10 + + def test_hist_df_with_nonnumerics(self): + # GH 9853 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "B", "C", "D"], + ) + df["E"] = ["x", "y"] * 5 + _, ax = mpl.pyplot.subplots() + ax = df.plot.hist(bins=5, ax=ax) + assert len(ax.patches) == 20 + + def test_hist_df_with_nonnumerics_no_bins(self): + # GH 9853 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "B", "C", "D"], + ) + df["E"] = ["x", "y"] * 5 + _, ax = mpl.pyplot.subplots() + ax = df.plot.hist(ax=ax) # bins=10 + assert len(ax.patches) == 40 + + def test_hist_secondary_legend(self): + # GH 9610 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), columns=list("abcd") + ) + + # primary -> secondary + _, ax = mpl.pyplot.subplots() + ax = df["a"].plot.hist(legend=True, ax=ax) + df["b"].plot.hist(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left and right axis must be visible + _check_legend_labels(ax, labels=["a", "b (right)"]) + assert ax.get_yaxis().get_visible() + assert ax.right_ax.get_yaxis().get_visible() + + def test_hist_secondary_secondary(self): + # GH 9610 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), columns=list("abcd") + ) + # secondary -> secondary + _, ax = mpl.pyplot.subplots() + ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax) + df["b"].plot.hist(ax=ax, legend=True, secondary_y=True) + # both legends are draw on left ax + # left axis must be invisible, right axis must be visible + _check_legend_labels(ax.left_ax, labels=["a (right)", "b (right)"]) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_hist_secondary_primary(self): + # GH 9610 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), columns=list("abcd") + ) + # secondary -> primary + _, ax = mpl.pyplot.subplots() + ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax) + # right axes is returned + df["b"].plot.hist(ax=ax, legend=True) + # both legends are draw on left ax + # left and right axis must be visible + _check_legend_labels(ax.left_ax, labels=["a (right)", "b"]) + assert ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_hist_with_nans_and_weights(self): + # GH 48884 + mpl_patches = pytest.importorskip("matplotlib.patches") + df = DataFrame( + [[np.nan, 0.2, 0.3], [0.4, np.nan, np.nan], [0.7, 0.8, 0.9]], + columns=list("abc"), + ) + weights = np.array([0.25, 0.3, 0.45]) + no_nan_df = DataFrame([[0.4, 0.2, 0.3], [0.7, 0.8, 0.9]], columns=list("abc")) + no_nan_weights = np.array([[0.3, 0.25, 0.25], [0.45, 0.45, 0.45]]) + + _, ax0 = mpl.pyplot.subplots() + df.plot.hist(ax=ax0, weights=weights) + rects = [x for x in ax0.get_children() if isinstance(x, mpl_patches.Rectangle)] + heights = [rect.get_height() for rect in rects] + _, ax1 = mpl.pyplot.subplots() + no_nan_df.plot.hist(ax=ax1, weights=no_nan_weights) + no_nan_rects = [ + x for x in ax1.get_children() if isinstance(x, mpl_patches.Rectangle) + ] + no_nan_heights = [rect.get_height() for rect in no_nan_rects] + assert all(h0 == h1 for h0, h1 in zip(heights, no_nan_heights)) + + idxerror_weights = np.array([[0.3, 0.25], [0.45, 0.45]]) + + msg = "weights must have the same shape as data, or be a single column" + with pytest.raises(ValueError, match=msg): + _, ax2 = mpl.pyplot.subplots() + no_nan_df.plot.hist(ax=ax2, weights=idxerror_weights) + + +class TestDataFrameGroupByPlots: + def test_grouped_hist_legacy(self): + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(10) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + + axes = _grouped_hist(df.A, by=df.C) + _check_axes_shape(axes, axes_num=4, layout=(2, 2)) + + def test_grouped_hist_legacy_axes_shape_no_col(self): + rs = np.random.default_rng(10) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + axes = df.hist(by=df.C) + _check_axes_shape(axes, axes_num=4, layout=(2, 2)) + + def test_grouped_hist_legacy_single_key(self): + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + # group by a key with single value + axes = df.hist(by="D", rot=30) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + _check_ticks_props(axes, xrot=30) + + def test_grouped_hist_legacy_grouped_hist_kwargs(self): + from matplotlib.patches import Rectangle + + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + # make sure kwargs to hist are handled + xf, yf = 20, 18 + xrot, yrot = 30, 40 + + axes = _grouped_hist( + df.A, + by=df.C, + cumulative=True, + bins=4, + xlabelsize=xf, + xrot=xrot, + ylabelsize=yf, + yrot=yrot, + density=True, + ) + # height of last bin (index 5) must be 1.0 + for ax in axes.ravel(): + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + height = rects[-1].get_height() + tm.assert_almost_equal(height, 1.0) + _check_ticks_props(axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + + def test_grouped_hist_legacy_grouped_hist(self): + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + axes = _grouped_hist(df.A, by=df.C, log=True) + # scale of y must be 'log' + _check_ax_scales(axes, yaxis="log") + + def test_grouped_hist_legacy_external_err(self): + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + # propagate attr exception from matplotlib.Axes.hist + with tm.external_error_raised(AttributeError): + _grouped_hist(df.A, by=df.C, foo="bar") + + def test_grouped_hist_legacy_figsize_err(self): + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + msg = "Specify figure size by tuple instead" + with pytest.raises(ValueError, match=msg): + df.hist(by="C", figsize="default") + + def test_grouped_hist_legacy2(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender_int = np.random.default_rng(2).choice([0, 1], size=n) + df_int = DataFrame({"height": height, "weight": weight, "gender": gender_int}) + gb = df_int.groupby("gender") + axes = gb.hist() + assert len(axes) == 2 + assert len(mpl.pyplot.get_fignums()) == 2 + + @pytest.mark.slow + @pytest.mark.parametrize( + "msg, plot_col, by_col, layout", + [ + [ + "Layout of 1x1 must be larger than required size 2", + "weight", + "gender", + (1, 1), + ], + [ + "Layout of 1x3 must be larger than required size 4", + "height", + "category", + (1, 3), + ], + [ + "At least one dimension of layout must be positive", + "height", + "category", + (-1, -1), + ], + ], + ) + def test_grouped_hist_layout_error(self, hist_df, msg, plot_col, by_col, layout): + df = hist_df + with pytest.raises(ValueError, match=msg): + df.hist(column=plot_col, by=getattr(df, by_col), layout=layout) + + @pytest.mark.slow + def test_grouped_hist_layout_warning(self, hist_df): + df = hist_df + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + df.hist, column="height", by=df.gender, layout=(2, 1) + ) + _check_axes_shape(axes, axes_num=2, layout=(2, 1)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "layout, check_layout, figsize", + [[(4, 1), (4, 1), None], [(-1, 1), (4, 1), None], [(4, 2), (4, 2), (12, 8)]], + ) + def test_grouped_hist_layout_figsize(self, hist_df, layout, check_layout, figsize): + df = hist_df + axes = df.hist(column="height", by=df.category, layout=layout, figsize=figsize) + _check_axes_shape(axes, axes_num=4, layout=check_layout, figsize=figsize) + + @pytest.mark.slow + @pytest.mark.parametrize("kwargs", [{}, {"column": "height", "layout": (2, 2)}]) + def test_grouped_hist_layout_by_warning(self, hist_df, kwargs): + df = hist_df + # GH 6769 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.hist, by="classroom", **kwargs) + _check_axes_shape(axes, axes_num=3, layout=(2, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs, axes_num, layout", + [ + [{"by": "gender", "layout": (3, 5)}, 2, (3, 5)], + [{"column": ["height", "weight", "category"]}, 3, (2, 2)], + ], + ) + def test_grouped_hist_layout_axes(self, hist_df, kwargs, axes_num, layout): + df = hist_df + axes = df.hist(**kwargs) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + def test_grouped_hist_multiple_axes(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + + fig, axes = mpl.pyplot.subplots(2, 3) + returned = df.hist(column=["height", "weight", "category"], ax=axes[0]) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[0]) + assert returned[0].figure is fig + + def test_grouped_hist_multiple_axes_no_cols(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + + fig, axes = mpl.pyplot.subplots(2, 3) + returned = df.hist(by="classroom", ax=axes[1]) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[1]) + assert returned[0].figure is fig + + def test_grouped_hist_multiple_axes_error(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + fig, axes = mpl.pyplot.subplots(2, 3) + # pass different number of axes from required + msg = "The number of passed axes must be 1, the same as the output plot" + with pytest.raises(ValueError, match=msg): + axes = df.hist(column="height", ax=axes) + + def test_axis_share_x(self, hist_df): + df = hist_df + # GH4089 + ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True) + + # share x + assert get_x_axis(ax1).joined(ax1, ax2) + assert get_x_axis(ax2).joined(ax1, ax2) + + # don't share y + assert not get_y_axis(ax1).joined(ax1, ax2) + assert not get_y_axis(ax2).joined(ax1, ax2) + + def test_axis_share_y(self, hist_df): + df = hist_df + ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True) + + # share y + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + + # don't share x + assert not get_x_axis(ax1).joined(ax1, ax2) + assert not get_x_axis(ax2).joined(ax1, ax2) + + def test_axis_share_xy(self, hist_df): + df = hist_df + ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True) + + # share both x and y + assert get_x_axis(ax1).joined(ax1, ax2) + assert get_x_axis(ax2).joined(ax1, ax2) + + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + + @pytest.mark.parametrize( + "histtype, expected", + [ + ("bar", True), + ("barstacked", True), + ("step", False), + ("stepfilled", True), + ], + ) + def test_histtype_argument(self, histtype, expected): + # GH23992 Verify functioning of histtype argument + df = DataFrame( + np.random.default_rng(2).integers(1, 10, size=(10, 2)), columns=["a", "b"] + ) + ax = df.hist(by="a", histtype=histtype) + _check_patches_all_filled(ax, filled=expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_misc.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb657c2a800fefe2d509ddfb398399af4ce8649 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_misc.py @@ -0,0 +1,720 @@ +""" Test cases for misc plot functions """ +import os + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + Index, + Series, + Timestamp, + date_range, + interval_range, + period_range, + plotting, + read_csv, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_colors, + _check_legend_labels, + _check_plot_works, + _check_text_labels, + _check_ticks_props, +) + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") +cm = pytest.importorskip("matplotlib.cm") + + +@pytest.fixture +def iris(datapath) -> DataFrame: + """ + The iris dataset as a DataFrame. + """ + return read_csv(datapath("io", "data", "csv", "iris.csv")) + + +@td.skip_if_installed("matplotlib") +def test_import_error_message(): + # GH-19810 + df = DataFrame({"A": [1, 2]}) + + with pytest.raises(ImportError, match="matplotlib is required for plotting"): + df.plot() + + +def test_get_accessor_args(): + func = plotting._core.PlotAccessor._get_call_args + + msg = "Called plot accessor for type list, expected Series or DataFrame" + with pytest.raises(TypeError, match=msg): + func(backend_name="", data=[], args=[], kwargs={}) + + msg = "should not be called with positional arguments" + with pytest.raises(TypeError, match=msg): + func(backend_name="", data=Series(dtype=object), args=["line", None], kwargs={}) + + x, y, kind, kwargs = func( + backend_name="", + data=DataFrame(), + args=["x"], + kwargs={"y": "y", "kind": "bar", "grid": False}, + ) + assert x == "x" + assert y == "y" + assert kind == "bar" + assert kwargs == {"grid": False} + + x, y, kind, kwargs = func( + backend_name="pandas.plotting._matplotlib", + data=Series(dtype=object), + args=[], + kwargs={}, + ) + assert x is None + assert y is None + assert kind == "line" + assert len(kwargs) == 24 + + +@pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds) +@pytest.mark.parametrize( + "data", [DataFrame(np.arange(15).reshape(5, 3)), Series(range(5))] +) +@pytest.mark.parametrize( + "index", + [ + Index(range(5)), + date_range("2020-01-01", periods=5), + period_range("2020-01-01", periods=5), + ], +) +def test_savefig(kind, data, index): + fig, ax = plt.subplots() + data.index = index + kwargs = {} + if kind in ["hexbin", "scatter", "pie"]: + if isinstance(data, Series): + pytest.skip(f"{kind} not supported with Series") + kwargs = {"x": 0, "y": 1} + data.plot(kind=kind, ax=ax, **kwargs) + fig.savefig(os.devnull) + + +class TestSeriesPlots: + def test_autocorrelation_plot(self): + from pandas.plotting import autocorrelation_plot + + ser = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + # Ensure no UserWarning when making plot + with tm.assert_produces_warning(None): + _check_plot_works(autocorrelation_plot, series=ser) + _check_plot_works(autocorrelation_plot, series=ser.values) + + ax = autocorrelation_plot(ser, label="Test") + _check_legend_labels(ax, labels=["Test"]) + + @pytest.mark.parametrize("kwargs", [{}, {"lag": 5}]) + def test_lag_plot(self, kwargs): + from pandas.plotting import lag_plot + + ser = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + _check_plot_works(lag_plot, series=ser, **kwargs) + + def test_bootstrap_plot(self): + from pandas.plotting import bootstrap_plot + + ser = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + _check_plot_works(bootstrap_plot, series=ser, size=10) + + +class TestDataFramePlots: + @pytest.mark.parametrize("pass_axis", [False, True]) + def test_scatter_matrix_axis(self, pass_axis): + pytest.importorskip("scipy") + scatter_matrix = plotting.scatter_matrix + + ax = None + if pass_axis: + _, ax = mpl.pyplot.subplots(3, 3) + + df = DataFrame(np.random.default_rng(2).standard_normal((100, 3))) + + # we are plotting multiples on a sub-plot + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + scatter_matrix, + frame=df, + range_padding=0.1, + ax=ax, + ) + axes0_labels = axes[0][0].yaxis.get_majorticklabels() + # GH 5662 + expected = ["-2", "0", "2"] + _check_text_labels(axes0_labels, expected) + _check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + @pytest.mark.parametrize("pass_axis", [False, True]) + def test_scatter_matrix_axis_smaller(self, pass_axis): + pytest.importorskip("scipy") + scatter_matrix = plotting.scatter_matrix + + ax = None + if pass_axis: + _, ax = mpl.pyplot.subplots(3, 3) + + df = DataFrame(np.random.default_rng(11).standard_normal((100, 3))) + df[0] = (df[0] - 2) / 3 + + # we are plotting multiples on a sub-plot + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + scatter_matrix, + frame=df, + range_padding=0.1, + ax=ax, + ) + axes0_labels = axes[0][0].yaxis.get_majorticklabels() + expected = ["-1.0", "-0.5", "0.0"] + _check_text_labels(axes0_labels, expected) + _check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + @pytest.mark.slow + def test_andrews_curves_no_warning(self, iris): + from pandas.plotting import andrews_curves + + df = iris + # Ensure no UserWarning when making plot + with tm.assert_produces_warning(None): + _check_plot_works(andrews_curves, frame=df, class_column="Name") + + @pytest.mark.slow + @pytest.mark.parametrize( + "linecolors", + [ + ("#556270", "#4ECDC4", "#C7F464"), + ["dodgerblue", "aquamarine", "seagreen"], + ], + ) + @pytest.mark.parametrize( + "df", + [ + "iris", + DataFrame( + { + "A": np.random.default_rng(2).standard_normal(10), + "B": np.random.default_rng(2).standard_normal(10), + "C": np.random.default_rng(2).standard_normal(10), + "Name": ["A"] * 10, + } + ), + ], + ) + def test_andrews_curves_linecolors(self, request, df, linecolors): + from pandas.plotting import andrews_curves + + if isinstance(df, str): + df = request.getfixturevalue(df) + ax = _check_plot_works( + andrews_curves, frame=df, class_column="Name", color=linecolors + ) + _check_colors( + ax.get_lines()[:10], linecolors=linecolors, mapping=df["Name"][:10] + ) + + @pytest.mark.slow + @pytest.mark.parametrize( + "df", + [ + "iris", + DataFrame( + { + "A": np.random.default_rng(2).standard_normal(10), + "B": np.random.default_rng(2).standard_normal(10), + "C": np.random.default_rng(2).standard_normal(10), + "Name": ["A"] * 10, + } + ), + ], + ) + def test_andrews_curves_cmap(self, request, df): + from pandas.plotting import andrews_curves + + if isinstance(df, str): + df = request.getfixturevalue(df) + cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())] + ax = _check_plot_works( + andrews_curves, frame=df, class_column="Name", color=cmaps + ) + _check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]) + + @pytest.mark.slow + def test_andrews_curves_handle(self): + from pandas.plotting import andrews_curves + + colors = ["b", "g", "r"] + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors}) + ax = andrews_curves(df, "Name", color=colors) + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, linecolors=colors) + + @pytest.mark.slow + @pytest.mark.parametrize( + "color", + [("#556270", "#4ECDC4", "#C7F464"), ["dodgerblue", "aquamarine", "seagreen"]], + ) + def test_parallel_coordinates_colors(self, iris, color): + from pandas.plotting import parallel_coordinates + + df = iris + + ax = _check_plot_works( + parallel_coordinates, frame=df, class_column="Name", color=color + ) + _check_colors(ax.get_lines()[:10], linecolors=color, mapping=df["Name"][:10]) + + @pytest.mark.slow + def test_parallel_coordinates_cmap(self, iris): + from matplotlib import cm + + from pandas.plotting import parallel_coordinates + + df = iris + + ax = _check_plot_works( + parallel_coordinates, frame=df, class_column="Name", colormap=cm.jet + ) + cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())] + _check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]) + + @pytest.mark.slow + def test_parallel_coordinates_line_diff(self, iris): + from pandas.plotting import parallel_coordinates + + df = iris + + ax = _check_plot_works(parallel_coordinates, frame=df, class_column="Name") + nlines = len(ax.get_lines()) + nxticks = len(ax.xaxis.get_ticklabels()) + + ax = _check_plot_works( + parallel_coordinates, frame=df, class_column="Name", axvlines=False + ) + assert len(ax.get_lines()) == (nlines - nxticks) + + @pytest.mark.slow + def test_parallel_coordinates_handles(self, iris): + from pandas.plotting import parallel_coordinates + + df = iris + colors = ["b", "g", "r"] + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors}) + ax = parallel_coordinates(df, "Name", color=colors) + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, linecolors=colors) + + # not sure if this is indicative of a problem + @pytest.mark.filterwarnings("ignore:Attempting to set:UserWarning") + def test_parallel_coordinates_with_sorted_labels(self): + """For #15908""" + from pandas.plotting import parallel_coordinates + + df = DataFrame( + { + "feat": list(range(30)), + "class": [2 for _ in range(10)] + + [3 for _ in range(10)] + + [1 for _ in range(10)], + } + ) + ax = parallel_coordinates(df, "class", sort_labels=True) + polylines, labels = ax.get_legend_handles_labels() + color_label_tuples = zip( + [polyline.get_color() for polyline in polylines], labels + ) + ordered_color_label_tuples = sorted(color_label_tuples, key=lambda x: x[1]) + prev_next_tupels = zip( + list(ordered_color_label_tuples[0:-1]), list(ordered_color_label_tuples[1:]) + ) + for prev, nxt in prev_next_tupels: + # labels and colors are ordered strictly increasing + assert prev[1] < nxt[1] and prev[0] < nxt[0] + + def test_radviz_no_warning(self, iris): + from pandas.plotting import radviz + + df = iris + # Ensure no UserWarning when making plot + with tm.assert_produces_warning(None): + _check_plot_works(radviz, frame=df, class_column="Name") + + @pytest.mark.parametrize( + "color", + [("#556270", "#4ECDC4", "#C7F464"), ["dodgerblue", "aquamarine", "seagreen"]], + ) + def test_radviz_color(self, iris, color): + from pandas.plotting import radviz + + df = iris + ax = _check_plot_works(radviz, frame=df, class_column="Name", color=color) + # skip Circle drawn as ticks + patches = [p for p in ax.patches[:20] if p.get_label() != ""] + _check_colors(patches[:10], facecolors=color, mapping=df["Name"][:10]) + + def test_radviz_color_cmap(self, iris): + from matplotlib import cm + + from pandas.plotting import radviz + + df = iris + ax = _check_plot_works(radviz, frame=df, class_column="Name", colormap=cm.jet) + cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())] + patches = [p for p in ax.patches[:20] if p.get_label() != ""] + _check_colors(patches, facecolors=cmaps, mapping=df["Name"][:10]) + + def test_radviz_colors_handles(self): + from pandas.plotting import radviz + + colors = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0]] + df = DataFrame( + {"A": [1, 2, 3], "B": [2, 1, 3], "C": [3, 2, 1], "Name": ["b", "g", "r"]} + ) + ax = radviz(df, "Name", color=colors) + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, facecolors=colors) + + def test_subplot_titles(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + + # Case len(title) == len(df) + plot = df.plot(subplots=True, title=title) + assert [p.get_title() for p in plot] == title + + def test_subplot_titles_too_much(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + # Case len(title) > len(df) + msg = ( + "The length of `title` must equal the number of columns if " + "using `title` of type `list` and `subplots=True`" + ) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, title=title + ["kittens > puppies"]) + + def test_subplot_titles_too_little(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + msg = ( + "The length of `title` must equal the number of columns if " + "using `title` of type `list` and `subplots=True`" + ) + # Case len(title) < len(df) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, title=title[:2]) + + def test_subplot_titles_subplots_false(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + # Case subplots=False and title is of type list + msg = ( + "Using `title` of type `list` is not supported unless " + "`subplots=True` is passed" + ) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=False, title=title) + + def test_subplot_titles_numeric_square_layout(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + # Case df with 3 numeric columns but layout of (2,2) + plot = df.drop("SepalWidth", axis=1).plot( + subplots=True, layout=(2, 2), title=title[:-1] + ) + title_list = [ax.get_title() for sublist in plot for ax in sublist] + assert title_list == title[:3] + [""] + + def test_get_standard_colors_random_seed(self): + # GH17525 + df = DataFrame(np.zeros((10, 10))) + + # Make sure that the random seed isn't reset by get_standard_colors + plotting.parallel_coordinates(df, 0) + rand1 = np.random.default_rng(None).random() + plotting.parallel_coordinates(df, 0) + rand2 = np.random.default_rng(None).random() + assert rand1 != rand2 + + def test_get_standard_colors_consistency(self): + # GH17525 + # Make sure it produces the same colors every time it's called + from pandas.plotting._matplotlib.style import get_standard_colors + + color1 = get_standard_colors(1, color_type="random") + color2 = get_standard_colors(1, color_type="random") + assert color1 == color2 + + def test_get_standard_colors_default_num_colors(self): + from pandas.plotting._matplotlib.style import get_standard_colors + + # Make sure the default color_types returns the specified amount + color1 = get_standard_colors(1, color_type="default") + color2 = get_standard_colors(9, color_type="default") + color3 = get_standard_colors(20, color_type="default") + assert len(color1) == 1 + assert len(color2) == 9 + assert len(color3) == 20 + + def test_plot_single_color(self): + # Example from #20585. All 3 bars should have the same color + df = DataFrame( + { + "account-start": ["2017-02-03", "2017-03-03", "2017-01-01"], + "client": ["Alice Anders", "Bob Baker", "Charlie Chaplin"], + "balance": [-1432.32, 10.43, 30000.00], + "db-id": [1234, 2424, 251], + "proxy-id": [525, 1525, 2542], + "rank": [52, 525, 32], + } + ) + ax = df.client.value_counts().plot.bar() + colors = [rect.get_facecolor() for rect in ax.get_children()[0:3]] + assert all(color == colors[0] for color in colors) + + def test_get_standard_colors_no_appending(self): + # GH20726 + + # Make sure not to add more colors so that matplotlib can cycle + # correctly. + from matplotlib import cm + + from pandas.plotting._matplotlib.style import get_standard_colors + + color_before = cm.gnuplot(range(5)) + color_after = get_standard_colors(1, color=color_before) + assert len(color_after) == len(color_before) + + df = DataFrame( + np.random.default_rng(2).standard_normal((48, 4)), columns=list("ABCD") + ) + + color_list = cm.gnuplot(np.linspace(0, 1, 16)) + p = df.A.plot.bar(figsize=(16, 7), color=color_list) + assert p.patches[1].get_facecolor() == p.patches[17].get_facecolor() + + @pytest.mark.parametrize("kind", ["bar", "line"]) + def test_dictionary_color(self, kind): + # issue-8193 + # Test plot color dictionary format + data_files = ["a", "b"] + + expected = [(0.5, 0.24, 0.6), (0.3, 0.7, 0.7)] + + df1 = DataFrame(np.random.default_rng(2).random((2, 2)), columns=data_files) + dic_color = {"b": (0.3, 0.7, 0.7), "a": (0.5, 0.24, 0.6)} + + ax = df1.plot(kind=kind, color=dic_color) + if kind == "bar": + colors = [rect.get_facecolor()[0:-1] for rect in ax.get_children()[0:3:2]] + else: + colors = [rect.get_color() for rect in ax.get_lines()[0:2]] + assert all(color == expected[index] for index, color in enumerate(colors)) + + def test_bar_plot(self): + # GH38947 + # Test bar plot with string and int index + from matplotlib.text import Text + + expected = [Text(0, 0, "0"), Text(1, 0, "Total")] + + df = DataFrame( + { + "a": [1, 2], + }, + index=Index([0, "Total"]), + ) + plot_bar = df.plot.bar() + assert all( + (a.get_text() == b.get_text()) + for a, b in zip(plot_bar.get_xticklabels(), expected) + ) + + def test_barh_plot_labels_mixed_integer_string(self): + # GH39126 + # Test barh plot with string and integer at the same column + from matplotlib.text import Text + + df = DataFrame([{"word": 1, "value": 0}, {"word": "knowledge", "value": 2}]) + plot_barh = df.plot.barh(x="word", legend=None) + expected_yticklabels = [Text(0, 0, "1"), Text(0, 1, "knowledge")] + assert all( + actual.get_text() == expected.get_text() + for actual, expected in zip( + plot_barh.get_yticklabels(), expected_yticklabels + ) + ) + + def test_has_externally_shared_axis_x_axis(self): + # GH33819 + # Test _has_externally_shared_axis() works for x-axis + func = plotting._matplotlib.tools._has_externally_shared_axis + + fig = mpl.pyplot.figure() + plots = fig.subplots(2, 4) + + # Create *externally* shared axes for first and third columns + plots[0][0] = fig.add_subplot(231, sharex=plots[1][0]) + plots[0][2] = fig.add_subplot(233, sharex=plots[1][2]) + + # Create *internally* shared axes for second and third columns + plots[0][1].twinx() + plots[0][2].twinx() + + # First column is only externally shared + # Second column is only internally shared + # Third column is both + # Fourth column is neither + assert func(plots[0][0], "x") + assert not func(plots[0][1], "x") + assert func(plots[0][2], "x") + assert not func(plots[0][3], "x") + + def test_has_externally_shared_axis_y_axis(self): + # GH33819 + # Test _has_externally_shared_axis() works for y-axis + func = plotting._matplotlib.tools._has_externally_shared_axis + + fig = mpl.pyplot.figure() + plots = fig.subplots(4, 2) + + # Create *externally* shared axes for first and third rows + plots[0][0] = fig.add_subplot(321, sharey=plots[0][1]) + plots[2][0] = fig.add_subplot(325, sharey=plots[2][1]) + + # Create *internally* shared axes for second and third rows + plots[1][0].twiny() + plots[2][0].twiny() + + # First row is only externally shared + # Second row is only internally shared + # Third row is both + # Fourth row is neither + assert func(plots[0][0], "y") + assert not func(plots[1][0], "y") + assert func(plots[2][0], "y") + assert not func(plots[3][0], "y") + + def test_has_externally_shared_axis_invalid_compare_axis(self): + # GH33819 + # Test _has_externally_shared_axis() raises an exception when + # passed an invalid value as compare_axis parameter + func = plotting._matplotlib.tools._has_externally_shared_axis + + fig = mpl.pyplot.figure() + plots = fig.subplots(4, 2) + + # Create arbitrary axes + plots[0][0] = fig.add_subplot(321, sharey=plots[0][1]) + + # Check that an invalid compare_axis value triggers the expected exception + msg = "needs 'x' or 'y' as a second parameter" + with pytest.raises(ValueError, match=msg): + func(plots[0][0], "z") + + def test_externally_shared_axes(self): + # Example from GH33819 + # Create data + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(1000), + "b": np.random.default_rng(2).standard_normal(1000), + } + ) + + # Create figure + fig = mpl.pyplot.figure() + plots = fig.subplots(2, 3) + + # Create *externally* shared axes + plots[0][0] = fig.add_subplot(231, sharex=plots[1][0]) + # note: no plots[0][1] that's the twin only case + plots[0][2] = fig.add_subplot(233, sharex=plots[1][2]) + + # Create *internally* shared axes + # note: no plots[0][0] that's the external only case + twin_ax1 = plots[0][1].twinx() + twin_ax2 = plots[0][2].twinx() + + # Plot data to primary axes + df["a"].plot(ax=plots[0][0], title="External share only").set_xlabel( + "this label should never be visible" + ) + df["a"].plot(ax=plots[1][0]) + + df["a"].plot(ax=plots[0][1], title="Internal share (twin) only").set_xlabel( + "this label should always be visible" + ) + df["a"].plot(ax=plots[1][1]) + + df["a"].plot(ax=plots[0][2], title="Both").set_xlabel( + "this label should never be visible" + ) + df["a"].plot(ax=plots[1][2]) + + # Plot data to twinned axes + df["b"].plot(ax=twin_ax1, color="green") + df["b"].plot(ax=twin_ax2, color="yellow") + + assert not plots[0][0].xaxis.get_label().get_visible() + assert plots[0][1].xaxis.get_label().get_visible() + assert not plots[0][2].xaxis.get_label().get_visible() + + def test_plot_bar_axis_units_timestamp_conversion(self): + # GH 38736 + # Ensure string x-axis from the second plot will not be converted to datetime + # due to axis data from first plot + df = DataFrame( + [1.0], + index=[Timestamp("2022-02-22 22:22:22")], + ) + _check_plot_works(df.plot) + s = Series({"A": 1.0}) + _check_plot_works(s.plot.bar) + + def test_bar_plt_xaxis_intervalrange(self): + # GH 38969 + # Ensure IntervalIndex x-axis produces a bar plot as expected + from matplotlib.text import Text + + expected = [Text(0, 0, "([0, 1],)"), Text(1, 0, "([1, 2],)")] + s = Series( + [1, 2], + index=[interval_range(0, 2, closed="both")], + ) + _check_plot_works(s.plot.bar) + assert all( + (a.get_text() == b.get_text()) + for a, b in zip(s.plot.bar().get_xticklabels(), expected) + ) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_series.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_series.py new file mode 100644 index 0000000000000000000000000000000000000000..2b2f2f3b84307b9ed69e440d2ac0112abf153e67 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_series.py @@ -0,0 +1,985 @@ +""" Test cases for Series.plot """ +from datetime import datetime +from itertools import chain + +import numpy as np +import pytest + +from pandas.compat import is_platform_linux +from pandas.compat.numpy import np_version_gte1p24 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, + period_range, + plotting, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_ax_scales, + _check_axes_shape, + _check_colors, + _check_grid_settings, + _check_has_errorbars, + _check_legend_labels, + _check_plot_works, + _check_text_labels, + _check_ticks_props, + _unpack_cycler, + get_y_axis, +) + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +@pytest.fixture +def ts(): + return Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + + +@pytest.fixture +def series(): + return Series( + range(20), dtype=np.float64, name="series", index=[f"i_{i}" for i in range(20)] + ) + + +class TestSeriesPlots: + @pytest.mark.slow + @pytest.mark.parametrize("kwargs", [{"label": "foo"}, {"use_index": False}]) + def test_plot(self, ts, kwargs): + _check_plot_works(ts.plot, **kwargs) + + @pytest.mark.slow + def test_plot_tick_props(self, ts): + axes = _check_plot_works(ts.plot, rot=0) + _check_ticks_props(axes, xrot=0) + + @pytest.mark.slow + @pytest.mark.parametrize( + "scale, exp_scale", + [ + [{"logy": True}, {"yaxis": "log"}], + [{"logx": True}, {"xaxis": "log"}], + [{"loglog": True}, {"xaxis": "log", "yaxis": "log"}], + ], + ) + def test_plot_scales(self, ts, scale, exp_scale): + ax = _check_plot_works(ts.plot, style=".", **scale) + _check_ax_scales(ax, **exp_scale) + + @pytest.mark.slow + def test_plot_ts_bar(self, ts): + _check_plot_works(ts[:10].plot.bar) + + @pytest.mark.slow + def test_plot_ts_area_stacked(self, ts): + _check_plot_works(ts.plot.area, stacked=False) + + def test_plot_iseries(self): + ser = Series(range(5), period_range("2020-01-01", periods=5)) + _check_plot_works(ser.plot) + + @pytest.mark.parametrize( + "kind", + [ + "line", + "bar", + "barh", + pytest.param("kde", marks=td.skip_if_no("scipy")), + "hist", + "box", + ], + ) + def test_plot_series_kinds(self, series, kind): + _check_plot_works(series[:5].plot, kind=kind) + + def test_plot_series_barh(self, series): + _check_plot_works(series[:10].plot.barh) + + def test_plot_series_bar_ax(self): + ax = _check_plot_works( + Series(np.random.default_rng(2).standard_normal(10)).plot.bar, color="black" + ) + _check_colors([ax.patches[0]], facecolors=["black"]) + + @pytest.mark.parametrize("kwargs", [{}, {"layout": (-1, 1)}, {"layout": (1, -1)}]) + def test_plot_6951(self, ts, kwargs): + # GH 6951 + ax = _check_plot_works(ts.plot, subplots=True, **kwargs) + _check_axes_shape(ax, axes_num=1, layout=(1, 1)) + + def test_plot_figsize_and_title(self, series): + # figsize and title + _, ax = mpl.pyplot.subplots() + ax = series.plot(title="Test", figsize=(16, 8), ax=ax) + _check_text_labels(ax.title, "Test") + _check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8)) + + def test_dont_modify_rcParams(self): + # GH 8242 + key = "axes.prop_cycle" + colors = mpl.pyplot.rcParams[key] + _, ax = mpl.pyplot.subplots() + Series([1, 2, 3]).plot(ax=ax) + assert colors == mpl.pyplot.rcParams[key] + + @pytest.mark.parametrize("kwargs", [{}, {"secondary_y": True}]) + def test_ts_line_lim(self, ts, kwargs): + _, ax = mpl.pyplot.subplots() + ax = ts.plot(ax=ax, **kwargs) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data(orig=False)[0][0] + assert xmax >= lines[0].get_data(orig=False)[0][-1] + + def test_ts_area_lim(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.area(stacked=False, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=0) + + def test_ts_area_lim_xcompat(self, ts): + # GH 7471 + _, ax = mpl.pyplot.subplots() + ax = ts.plot.area(stacked=False, x_compat=True, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=30) + + def test_ts_tz_area_lim_xcompat(self, ts): + tz_ts = ts.copy() + tz_ts.index = tz_ts.tz_localize("GMT").tz_convert("CET") + _, ax = mpl.pyplot.subplots() + ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=0) + + def test_ts_tz_area_lim_xcompat_secondary_y(self, ts): + tz_ts = ts.copy() + tz_ts.index = tz_ts.tz_localize("GMT").tz_convert("CET") + _, ax = mpl.pyplot.subplots() + ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=0) + + def test_area_sharey_dont_overwrite(self, ts): + # GH37942 + fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2, sharey=True) + + abs(ts).plot(ax=ax1, kind="area") + abs(ts).plot(ax=ax2, kind="area") + + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + plt.close(fig) + + def test_label(self): + s = Series([1, 2]) + _, ax = mpl.pyplot.subplots() + ax = s.plot(label="LABEL", legend=True, ax=ax) + _check_legend_labels(ax, labels=["LABEL"]) + mpl.pyplot.close("all") + + def test_label_none(self): + s = Series([1, 2]) + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=True, ax=ax) + _check_legend_labels(ax, labels=[""]) + mpl.pyplot.close("all") + + def test_label_ser_name(self): + s = Series([1, 2], name="NAME") + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=True, ax=ax) + _check_legend_labels(ax, labels=["NAME"]) + mpl.pyplot.close("all") + + def test_label_ser_name_override(self): + s = Series([1, 2], name="NAME") + # override the default + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=True, label="LABEL", ax=ax) + _check_legend_labels(ax, labels=["LABEL"]) + mpl.pyplot.close("all") + + def test_label_ser_name_override_dont_draw(self): + s = Series([1, 2], name="NAME") + # Add lebel info, but don't draw + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=False, label="LABEL", ax=ax) + assert ax.get_legend() is None # Hasn't been drawn + ax.legend() # draw it + _check_legend_labels(ax, labels=["LABEL"]) + mpl.pyplot.close("all") + + def test_boolean(self): + # GH 23719 + s = Series([False, False, True]) + _check_plot_works(s.plot, include_bool=True) + + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + _check_plot_works(s.plot) + + @pytest.mark.parametrize("index", [None, date_range("2020-01-01", periods=4)]) + def test_line_area_nan_series(self, index): + values = [1, 2, np.nan, 3] + d = Series(values, index=index) + ax = _check_plot_works(d.plot) + masked = ax.lines[0].get_ydata() + # remove nan for comparison purpose + exp = np.array([1, 2, 3], dtype=np.float64) + tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp) + tm.assert_numpy_array_equal(masked.mask, np.array([False, False, True, False])) + + expected = np.array([1, 2, 0, 3], dtype=np.float64) + ax = _check_plot_works(d.plot, stacked=True) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + ax = _check_plot_works(d.plot.area) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + ax = _check_plot_works(d.plot.area, stacked=False) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + + def test_line_use_index_false(self): + s = Series([1, 2, 3], index=["a", "b", "c"]) + s.index.name = "The Index" + _, ax = mpl.pyplot.subplots() + ax = s.plot(use_index=False, ax=ax) + label = ax.get_xlabel() + assert label == "" + + def test_line_use_index_false_diff_var(self): + s = Series([1, 2, 3], index=["a", "b", "c"]) + s.index.name = "The Index" + _, ax = mpl.pyplot.subplots() + ax2 = s.plot.bar(use_index=False, ax=ax) + label2 = ax2.get_xlabel() + assert label2 == "" + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + @pytest.mark.parametrize("axis, meth", [("yaxis", "bar"), ("xaxis", "barh")]) + def test_bar_log(self, axis, meth): + expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]) + + _, ax = mpl.pyplot.subplots() + ax = getattr(Series([200, 500]).plot, meth)(log=True, ax=ax) + tm.assert_numpy_array_equal(getattr(ax, axis).get_ticklocs(), expected) + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + @pytest.mark.parametrize( + "axis, kind, res_meth", + [["yaxis", "bar", "get_ylim"], ["xaxis", "barh", "get_xlim"]], + ) + def test_bar_log_kind_bar(self, axis, kind, res_meth): + # GH 9905 + expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1]) + + _, ax = mpl.pyplot.subplots() + ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind=kind, ax=ax) + ymin = 0.0007943282347242822 + ymax = 0.12589254117941673 + res = getattr(ax, res_meth)() + tm.assert_almost_equal(res[0], ymin) + tm.assert_almost_equal(res[1], ymax) + tm.assert_numpy_array_equal(getattr(ax, axis).get_ticklocs(), expected) + + def test_bar_ignore_index(self): + df = Series([1, 2, 3, 4], index=["a", "b", "c", "d"]) + _, ax = mpl.pyplot.subplots() + ax = df.plot.bar(use_index=False, ax=ax) + _check_text_labels(ax.get_xticklabels(), ["0", "1", "2", "3"]) + + def test_bar_user_colors(self): + s = Series([1, 2, 3, 4]) + ax = s.plot.bar(color=["red", "blue", "blue", "red"]) + result = [p.get_facecolor() for p in ax.patches] + expected = [ + (1.0, 0.0, 0.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (1.0, 0.0, 0.0, 1.0), + ] + assert result == expected + + def test_rotation_default(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # Default rot 0 + _, ax = mpl.pyplot.subplots() + axes = df.plot(ax=ax) + _check_ticks_props(axes, xrot=0) + + def test_rotation_30(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + _, ax = mpl.pyplot.subplots() + axes = df.plot(rot=30, ax=ax) + _check_ticks_props(axes, xrot=30) + + def test_irregular_datetime(self): + from pandas.plotting._matplotlib.converter import DatetimeConverter + + rng = date_range("1/1/2000", "3/1/2000") + rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]] + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ax = ser.plot(ax=ax) + xp = DatetimeConverter.convert(datetime(1999, 1, 1), "", ax) + ax.set_xlim("1/1/1999", "1/1/2001") + assert xp == ax.get_xlim()[0] + _check_ticks_props(ax, xrot=30) + + def test_unsorted_index_xlim(self): + ser = Series( + [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0], + index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0], + ) + _, ax = mpl.pyplot.subplots() + ax = ser.plot(ax=ax) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= np.nanmin(lines[0].get_data(orig=False)[0]) + assert xmax >= np.nanmax(lines[0].get_data(orig=False)[0]) + + def test_pie_series(self): + # if sum of values is less than 1.0, pie handle them as rate and draw + # semicircle. + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + ax = _check_plot_works(series.plot.pie) + _check_text_labels(ax.texts, series.index) + assert ax.get_ylabel() == "YLABEL" + + def test_pie_series_no_label(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + ax = _check_plot_works(series.plot.pie, labels=None) + _check_text_labels(ax.texts, [""] * 5) + + def test_pie_series_less_colors_than_elements(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + color_args = ["r", "g", "b"] + ax = _check_plot_works(series.plot.pie, colors=color_args) + + color_expected = ["r", "g", "b", "r", "g"] + _check_colors(ax.patches, facecolors=color_expected) + + def test_pie_series_labels_and_colors(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + # with labels and colors + labels = ["A", "B", "C", "D", "E"] + color_args = ["r", "g", "b", "c", "m"] + ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args) + _check_text_labels(ax.texts, labels) + _check_colors(ax.patches, facecolors=color_args) + + def test_pie_series_autopct_and_fontsize(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + color_args = ["r", "g", "b", "c", "m"] + ax = _check_plot_works( + series.plot.pie, colors=color_args, autopct="%.2f", fontsize=7 + ) + pcts = [f"{s*100:.2f}" for s in series.values / series.sum()] + expected_texts = list(chain.from_iterable(zip(series.index, pcts))) + _check_text_labels(ax.texts, expected_texts) + for t in ax.texts: + assert t.get_fontsize() == 7 + + def test_pie_series_negative_raises(self): + # includes negative value + series = Series([1, 2, 0, 4, -1], index=["a", "b", "c", "d", "e"]) + with pytest.raises(ValueError, match="pie plot doesn't allow negative values"): + series.plot.pie() + + def test_pie_series_nan(self): + # includes nan + series = Series([1, 2, np.nan, 4], index=["a", "b", "c", "d"], name="YLABEL") + ax = _check_plot_works(series.plot.pie) + _check_text_labels(ax.texts, ["a", "b", "", "d"]) + + def test_pie_nan(self): + s = Series([1, np.nan, 1, 1]) + _, ax = mpl.pyplot.subplots() + ax = s.plot.pie(legend=True, ax=ax) + expected = ["0", "", "2", "3"] + result = [x.get_text() for x in ax.texts] + assert result == expected + + def test_df_series_secondary_legend(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + + # primary -> secondary (without passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + s.plot(legend=True, secondary_y=True, ax=ax) + # both legends are drawn on left ax + # left and right axis must be visible + _check_legend_labels(ax, labels=["a", "b", "c", "x (right)"]) + assert ax.get_yaxis().get_visible() + assert ax.right_ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_with_axes(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # primary -> secondary (with passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + s.plot(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left and right axis must be visible + _check_legend_labels(ax, labels=["a", "b", "c", "x (right)"]) + assert ax.get_yaxis().get_visible() + assert ax.right_ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_both(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # secondary -> secondary (without passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(secondary_y=True, ax=ax) + s.plot(legend=True, secondary_y=True, ax=ax) + # both legends are drawn on left ax + # left axis must be invisible and right axis must be visible + expected = ["a (right)", "b (right)", "c (right)", "x (right)"] + _check_legend_labels(ax.left_ax, labels=expected) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_both_with_axis(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # secondary -> secondary (with passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(secondary_y=True, ax=ax) + s.plot(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left axis must be invisible and right axis must be visible + expected = ["a (right)", "b (right)", "c (right)", "x (right)"] + _check_legend_labels(ax.left_ax, expected) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_both_with_axis_2(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # secondary -> secondary (with passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(secondary_y=True, mark_right=False, ax=ax) + s.plot(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left axis must be invisible and right axis must be visible + expected = ["a", "b", "c", "x (right)"] + _check_legend_labels(ax.left_ax, expected) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + @pytest.mark.parametrize( + "input_logy, expected_scale", [(True, "log"), ("sym", "symlog")] + ) + def test_secondary_logy(self, input_logy, expected_scale): + # GH 25545 + s1 = Series(np.random.default_rng(2).standard_normal(100)) + s2 = Series(np.random.default_rng(2).standard_normal(100)) + + # GH 24980 + ax1 = s1.plot(logy=input_logy) + ax2 = s2.plot(secondary_y=True, logy=input_logy) + + assert ax1.get_yscale() == expected_scale + assert ax2.get_yscale() == expected_scale + + def test_plot_fails_with_dupe_color_and_style(self): + x = Series(np.random.default_rng(2).standard_normal(2)) + _, ax = mpl.pyplot.subplots() + msg = ( + "Cannot pass 'style' string with a color symbol and 'color' keyword " + "argument. Please use one or the other or pass 'style' without a color " + "symbol" + ) + with pytest.raises(ValueError, match=msg): + x.plot(style="k--", color="k", ax=ax) + + @pytest.mark.parametrize( + "bw_method, ind", + [ + ["scott", 20], + [None, 20], + [None, np.int_(20)], + [0.5, np.linspace(-100, 100, 20)], + ], + ) + def test_kde_kwargs(self, ts, bw_method, ind): + pytest.importorskip("scipy") + _check_plot_works(ts.plot.kde, bw_method=bw_method, ind=ind) + + def test_density_kwargs(self, ts): + pytest.importorskip("scipy") + sample_points = np.linspace(-100, 100, 20) + _check_plot_works(ts.plot.density, bw_method=0.5, ind=sample_points) + + def test_kde_kwargs_check_axes(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + sample_points = np.linspace(-100, 100, 20) + ax = ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax) + _check_ax_scales(ax, yaxis="log") + _check_text_labels(ax.yaxis.get_label(), "Density") + + def test_kde_missing_vals(self): + pytest.importorskip("scipy") + s = Series(np.random.default_rng(2).uniform(size=50)) + s[0] = np.nan + axes = _check_plot_works(s.plot.kde) + + # gh-14821: check if the values have any missing values + assert any(~np.isnan(axes.lines[0].get_xdata())) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_boxplot_series(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.box(logy=True, ax=ax) + _check_ax_scales(ax, yaxis="log") + xlabels = ax.get_xticklabels() + _check_text_labels(xlabels, [ts.name]) + ylabels = ax.get_yticklabels() + _check_text_labels(ylabels, [""] * len(ylabels)) + + @pytest.mark.parametrize( + "kind", + plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds, + ) + def test_kind_kwarg(self, kind): + pytest.importorskip("scipy") + s = Series(range(3)) + _, ax = mpl.pyplot.subplots() + s.plot(kind=kind, ax=ax) + mpl.pyplot.close() + + @pytest.mark.parametrize( + "kind", + plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds, + ) + def test_kind_attr(self, kind): + pytest.importorskip("scipy") + s = Series(range(3)) + _, ax = mpl.pyplot.subplots() + getattr(s.plot, kind)() + mpl.pyplot.close() + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_invalid_plot_data(self, kind): + s = Series(list("abcd")) + _, ax = mpl.pyplot.subplots() + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + s.plot(kind=kind, ax=ax) + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_valid_object_plot(self, kind): + pytest.importorskip("scipy") + s = Series(range(10), dtype=object) + _check_plot_works(s.plot, kind=kind) + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_partially_invalid_plot_data(self, kind): + s = Series(["a", "b", 1.0, 2]) + _, ax = mpl.pyplot.subplots() + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + s.plot(kind=kind, ax=ax) + + def test_invalid_kind(self): + s = Series([1, 2]) + with pytest.raises(ValueError, match="invalid_kind is not a valid plot kind"): + s.plot(kind="invalid_kind") + + def test_dup_datetime_index_plot(self): + dr1 = date_range("1/1/2009", periods=4) + dr2 = date_range("1/2/2009", periods=4) + index = dr1.append(dr2) + values = np.random.default_rng(2).standard_normal(index.size) + s = Series(values, index=index) + _check_plot_works(s.plot) + + def test_errorbar_asymmetrical(self): + # GH9536 + s = Series(np.arange(10), name="x") + err = np.random.default_rng(2).random((2, 10)) + + ax = s.plot(yerr=err, xerr=err) + + result = np.vstack([i.vertices[:, 1] for i in ax.collections[1].get_paths()]) + expected = (err.T * np.array([-1, 1])) + s.to_numpy().reshape(-1, 1) + tm.assert_numpy_array_equal(result, expected) + + msg = ( + "Asymmetrical error bars should be provided " + f"with the shape \\(2, {len(s)}\\)" + ) + with pytest.raises(ValueError, match=msg): + s.plot(yerr=np.random.default_rng(2).random((2, 11))) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar"]) + @pytest.mark.parametrize( + "yerr", + [ + Series(np.abs(np.random.default_rng(2).standard_normal(10))), + np.abs(np.random.default_rng(2).standard_normal(10)), + list(np.abs(np.random.default_rng(2).standard_normal(10))), + DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 2))), + columns=["x", "y"], + ), + ], + ) + def test_errorbar_plot(self, kind, yerr): + s = Series(np.arange(10), name="x") + ax = _check_plot_works(s.plot, yerr=yerr, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + def test_errorbar_plot_yerr_0(self): + s = Series(np.arange(10), name="x") + s_err = np.abs(np.random.default_rng(2).standard_normal(10)) + ax = _check_plot_works(s.plot, xerr=s_err) + _check_has_errorbars(ax, xerr=1, yerr=0) + + @pytest.mark.slow + @pytest.mark.parametrize( + "yerr", + [ + Series(np.abs(np.random.default_rng(2).standard_normal(12))), + DataFrame( + np.abs(np.random.default_rng(2).standard_normal((12, 2))), + columns=["x", "y"], + ), + ], + ) + def test_errorbar_plot_ts(self, yerr): + # test time series plotting + ix = date_range("1/1/2000", "1/1/2001", freq="ME") + ts = Series(np.arange(12), index=ix, name="x") + yerr.index = ix + + ax = _check_plot_works(ts.plot, yerr=yerr) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + def test_errorbar_plot_invalid_yerr_shape(self): + s = Series(np.arange(10), name="x") + # check incorrect lengths and types + with tm.external_error_raised(ValueError): + s.plot(yerr=np.arange(11)) + + @pytest.mark.slow + def test_errorbar_plot_invalid_yerr(self): + s = Series(np.arange(10), name="x") + s_err = ["zzz"] * 10 + with tm.external_error_raised(TypeError): + s.plot(yerr=s_err) + + @pytest.mark.slow + def test_table_true(self, series): + _check_plot_works(series.plot, table=True) + + @pytest.mark.slow + def test_table_self(self, series): + _check_plot_works(series.plot, table=series) + + @pytest.mark.slow + def test_series_grid_settings(self): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + pytest.importorskip("scipy") + _check_grid_settings( + Series([1, 2, 3]), + plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds, + ) + + @pytest.mark.parametrize("c", ["r", "red", "green", "#FF0000"]) + def test_standard_colors(self, c): + from pandas.plotting._matplotlib.style import get_standard_colors + + result = get_standard_colors(1, color=c) + assert result == [c] + + result = get_standard_colors(1, color=[c]) + assert result == [c] + + result = get_standard_colors(3, color=c) + assert result == [c] * 3 + + result = get_standard_colors(3, color=[c]) + assert result == [c] * 3 + + def test_standard_colors_all(self): + from matplotlib import colors + + from pandas.plotting._matplotlib.style import get_standard_colors + + # multiple colors like mediumaquamarine + for c in colors.cnames: + result = get_standard_colors(num_colors=1, color=c) + assert result == [c] + + result = get_standard_colors(num_colors=1, color=[c]) + assert result == [c] + + result = get_standard_colors(num_colors=3, color=c) + assert result == [c] * 3 + + result = get_standard_colors(num_colors=3, color=[c]) + assert result == [c] * 3 + + # single letter colors like k + for c in colors.ColorConverter.colors: + result = get_standard_colors(num_colors=1, color=c) + assert result == [c] + + result = get_standard_colors(num_colors=1, color=[c]) + assert result == [c] + + result = get_standard_colors(num_colors=3, color=c) + assert result == [c] * 3 + + result = get_standard_colors(num_colors=3, color=[c]) + assert result == [c] * 3 + + def test_series_plot_color_kwargs(self): + # GH1890 + _, ax = mpl.pyplot.subplots() + ax = Series(np.arange(12) + 1).plot(color="green", ax=ax) + _check_colors(ax.get_lines(), linecolors=["green"]) + + def test_time_series_plot_color_kwargs(self): + # #1890 + _, ax = mpl.pyplot.subplots() + ax = Series(np.arange(12) + 1, index=date_range("1/1/2000", periods=12)).plot( + color="green", ax=ax + ) + _check_colors(ax.get_lines(), linecolors=["green"]) + + def test_time_series_plot_color_with_empty_kwargs(self): + import matplotlib as mpl + + def_colors = _unpack_cycler(mpl.rcParams) + index = date_range("1/1/2000", periods=12) + s = Series(np.arange(1, 13), index=index) + + ncolors = 3 + + _, ax = mpl.pyplot.subplots() + for i in range(ncolors): + ax = s.plot(ax=ax) + _check_colors(ax.get_lines(), linecolors=def_colors[:ncolors]) + + def test_xticklabels(self): + # GH11529 + s = Series(np.arange(10), index=[f"P{i:02d}" for i in range(10)]) + _, ax = mpl.pyplot.subplots() + ax = s.plot(xticks=[0, 3, 5, 9], ax=ax) + exp = [f"P{i:02d}" for i in [0, 3, 5, 9]] + _check_text_labels(ax.get_xticklabels(), exp) + + def test_xtick_barPlot(self): + # GH28172 + s = Series(range(10), index=[f"P{i:02d}" for i in range(10)]) + ax = s.plot.bar(xticks=range(0, 11, 2)) + exp = np.array(list(range(0, 11, 2))) + tm.assert_numpy_array_equal(exp, ax.get_xticks()) + + def test_custom_business_day_freq(self): + # GH7222 + from pandas.tseries.offsets import CustomBusinessDay + + s = Series( + range(100, 121), + index=pd.bdate_range( + start="2014-05-01", + end="2014-06-01", + freq=CustomBusinessDay(holidays=["2014-05-26"]), + ), + ) + + _check_plot_works(s.plot) + + @pytest.mark.xfail( + reason="GH#24426, see also " + "github.com/pandas-dev/pandas/commit/" + "ef1bd69fa42bbed5d09dd17f08c44fc8bfc2b685#r61470674" + ) + def test_plot_accessor_updates_on_inplace(self): + ser = Series([1, 2, 3, 4]) + _, ax = mpl.pyplot.subplots() + ax = ser.plot(ax=ax) + before = ax.xaxis.get_ticklocs() + + ser.drop([0, 1], inplace=True) + _, ax = mpl.pyplot.subplots() + after = ax.xaxis.get_ticklocs() + tm.assert_numpy_array_equal(before, after) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_plot_xlim_for_series(self, kind): + # test if xlim is also correctly plotted in Series for line and area + # GH 27686 + s = Series([2, 3]) + _, ax = mpl.pyplot.subplots() + s.plot(kind=kind, ax=ax) + xlims = ax.get_xlim() + + assert xlims[0] < 0 + assert xlims[1] > 1 + + def test_plot_no_rows(self): + # GH 27758 + df = Series(dtype=int) + assert df.empty + ax = df.plot() + assert len(ax.get_lines()) == 1 + line = ax.get_lines()[0] + assert len(line.get_xdata()) == 0 + assert len(line.get_ydata()) == 0 + + def test_plot_no_numeric_data(self): + df = Series(["a", "b", "c"]) + with pytest.raises(TypeError, match="no numeric data to plot"): + df.plot() + + @pytest.mark.parametrize( + "data, index", + [ + ([1, 2, 3, 4], [3, 2, 1, 0]), + ([10, 50, 20, 30], [1910, 1920, 1980, 1950]), + ], + ) + def test_plot_order(self, data, index): + # GH38865 Verify plot order of a Series + ser = Series(data=data, index=index) + ax = ser.plot(kind="bar") + + expected = ser.tolist() + result = [ + patch.get_bbox().ymax + for patch in sorted(ax.patches, key=lambda patch: patch.get_bbox().xmax) + ] + assert expected == result + + def test_style_single_ok(self): + s = Series([1, 2]) + ax = s.plot(style="s", color="C3") + assert ax.lines[0].get_color() == "C3" + + @pytest.mark.parametrize( + "index_name, old_label, new_label", + [(None, "", "new"), ("old", "old", "new"), (None, "", "")], + ) + @pytest.mark.parametrize("kind", ["line", "area", "bar", "barh", "hist"]) + def test_xlabel_ylabel_series(self, kind, index_name, old_label, new_label): + # GH 9093 + ser = Series([1, 2, 3, 4]) + ser.index.name = index_name + + # default is the ylabel is not shown and xlabel is index name (reverse for barh) + ax = ser.plot(kind=kind) + if kind == "barh": + assert ax.get_xlabel() == "" + assert ax.get_ylabel() == old_label + elif kind == "hist": + assert ax.get_xlabel() == "" + assert ax.get_ylabel() == "Frequency" + else: + assert ax.get_ylabel() == "" + assert ax.get_xlabel() == old_label + + # old xlabel will be overridden and assigned ylabel will be used as ylabel + ax = ser.plot(kind=kind, ylabel=new_label, xlabel=new_label) + assert ax.get_ylabel() == new_label + assert ax.get_xlabel() == new_label + + @pytest.mark.parametrize( + "index", + [ + pd.timedelta_range(start=0, periods=2, freq="D"), + [pd.Timedelta(days=1), pd.Timedelta(days=2)], + ], + ) + def test_timedelta_index(self, index): + # GH37454 + xlims = (3, 1) + ax = Series([1, 2], index=index).plot(xlim=(xlims)) + assert ax.get_xlim() == (3, 1) + + def test_series_none_color(self): + # GH51953 + series = Series([1, 2, 3]) + ax = series.plot(color=None) + expected = _unpack_cycler(mpl.pyplot.rcParams)[:1] + _check_colors(ax.get_lines(), linecolors=expected) + + @pytest.mark.slow + def test_plot_no_warning(self, ts): + # GH 55138 + # TODO(3.0): this can be removed once Period[B] deprecation is enforced + with tm.assert_produces_warning(False): + _ = ts.plot() diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_style.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_style.py new file mode 100644 index 0000000000000000000000000000000000000000..665bda15724fd67dc9917509d2b95957b03107e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/test_style.py @@ -0,0 +1,157 @@ +import pytest + +from pandas import Series + +pytest.importorskip("matplotlib") +from pandas.plotting._matplotlib.style import get_standard_colors + + +class TestGetStandardColors: + @pytest.mark.parametrize( + "num_colors, expected", + [ + (3, ["red", "green", "blue"]), + (5, ["red", "green", "blue", "red", "green"]), + (7, ["red", "green", "blue", "red", "green", "blue", "red"]), + (2, ["red", "green"]), + (1, ["red"]), + ], + ) + def test_default_colors_named_from_prop_cycle(self, num_colors, expected): + import matplotlib as mpl + from matplotlib.pyplot import cycler + + mpl_params = { + "axes.prop_cycle": cycler(color=["red", "green", "blue"]), + } + with mpl.rc_context(rc=mpl_params): + result = get_standard_colors(num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, ["b"]), + (3, ["b", "g", "r"]), + (4, ["b", "g", "r", "y"]), + (5, ["b", "g", "r", "y", "b"]), + (7, ["b", "g", "r", "y", "b", "g", "r"]), + ], + ) + def test_default_colors_named_from_prop_cycle_string(self, num_colors, expected): + import matplotlib as mpl + from matplotlib.pyplot import cycler + + mpl_params = { + "axes.prop_cycle": cycler(color="bgry"), + } + with mpl.rc_context(rc=mpl_params): + result = get_standard_colors(num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected_name", + [ + (1, ["C0"]), + (3, ["C0", "C1", "C2"]), + ( + 12, + [ + "C0", + "C1", + "C2", + "C3", + "C4", + "C5", + "C6", + "C7", + "C8", + "C9", + "C0", + "C1", + ], + ), + ], + ) + def test_default_colors_named_undefined_prop_cycle(self, num_colors, expected_name): + import matplotlib as mpl + import matplotlib.colors as mcolors + + with mpl.rc_context(rc={}): + expected = [mcolors.to_hex(x) for x in expected_name] + result = get_standard_colors(num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, ["red", "green", (0.1, 0.2, 0.3)]), + (2, ["red", "green", (0.1, 0.2, 0.3)]), + (3, ["red", "green", (0.1, 0.2, 0.3)]), + (4, ["red", "green", (0.1, 0.2, 0.3), "red"]), + ], + ) + def test_user_input_color_sequence(self, num_colors, expected): + color = ["red", "green", (0.1, 0.2, 0.3)] + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, ["r", "g", "b", "k"]), + (2, ["r", "g", "b", "k"]), + (3, ["r", "g", "b", "k"]), + (4, ["r", "g", "b", "k"]), + (5, ["r", "g", "b", "k", "r"]), + (6, ["r", "g", "b", "k", "r", "g"]), + ], + ) + def test_user_input_color_string(self, num_colors, expected): + color = "rgbk" + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, [(0.1, 0.2, 0.3)]), + (2, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]), + (3, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]), + ], + ) + def test_user_input_color_floats(self, num_colors, expected): + color = (0.1, 0.2, 0.3) + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "color, num_colors, expected", + [ + ("Crimson", 1, ["Crimson"]), + ("DodgerBlue", 2, ["DodgerBlue", "DodgerBlue"]), + ("firebrick", 3, ["firebrick", "firebrick", "firebrick"]), + ], + ) + def test_user_input_named_color_string(self, color, num_colors, expected): + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize("color", ["", [], (), Series([], dtype="object")]) + def test_empty_color_raises(self, color): + with pytest.raises(ValueError, match="Invalid color argument"): + get_standard_colors(color=color, num_colors=1) + + @pytest.mark.parametrize( + "color", + [ + "bad_color", + ("red", "green", "bad_color"), + (0.1,), + (0.1, 0.2), + (0.1, 0.2, 0.3, 0.4, 0.5), # must be either 3 or 4 floats + ], + ) + def test_bad_color_raises(self, color): + with pytest.raises(ValueError, match="Invalid color"): + get_standard_colors(color=color, num_colors=5) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e3851753b67421842a0d3d9fd5f88e7eb72734dd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__init__.py @@ -0,0 +1,4 @@ +""" +Tests for reductions where we want to test for matching behavior across +Array, Index, Series, and DataFrame methods. +""" diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e37296d3326488c5ac3dcb3d82f1cf71fdf66a1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__pycache__/test_reductions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__pycache__/test_reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23d99cf667443735fe65a3177adeb4ff78ae8651 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__pycache__/test_reductions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__pycache__/test_stat_reductions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__pycache__/test_stat_reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..999872e79d11cd463b2d239d04ffb184c6f9be5e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/__pycache__/test_stat_reductions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/test_reductions.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/test_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..30ec0d0affaa3b30facdb8bf55062017a217b5ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/test_reductions.py @@ -0,0 +1,1673 @@ +from datetime import ( + datetime, + timedelta, +) +from decimal import Decimal + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + NaT, + Period, + PeriodIndex, + RangeIndex, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, + date_range, + isna, + period_range, + timedelta_range, + to_timedelta, +) +import pandas._testing as tm +from pandas.core import nanops +from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics + + +def get_objs(): + indexes = [ + Index([True, False] * 5, name="a"), + Index(np.arange(10), dtype=np.int64, name="a"), + Index(np.arange(10), dtype=np.float64, name="a"), + DatetimeIndex(date_range("2020-01-01", periods=10), name="a"), + DatetimeIndex(date_range("2020-01-01", periods=10), name="a").tz_localize( + tz="US/Eastern" + ), + PeriodIndex(period_range("2020-01-01", periods=10, freq="D"), name="a"), + Index([str(i) for i in range(10)], name="a"), + ] + + arr = np.random.default_rng(2).standard_normal(10) + series = [Series(arr, index=idx, name="a") for idx in indexes] + + objs = indexes + series + return objs + + +class TestReductions: + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + @pytest.mark.parametrize("opname", ["max", "min"]) + @pytest.mark.parametrize("obj", get_objs()) + def test_ops(self, opname, obj): + result = getattr(obj, opname)() + if not isinstance(obj, PeriodIndex): + if isinstance(obj.values, ArrowStringArrayNumpySemantics): + # max not on the interface + expected = getattr(np.array(obj.values), opname)() + else: + expected = getattr(obj.values, opname)() + else: + expected = Period(ordinal=getattr(obj.asi8, opname)(), freq=obj.freq) + + if getattr(obj, "tz", None) is not None: + # We need to de-localize before comparing to the numpy-produced result + expected = expected.astype("M8[ns]").astype("int64") + assert result._value == expected + else: + assert result == expected + + @pytest.mark.parametrize("opname", ["max", "min"]) + @pytest.mark.parametrize( + "dtype, val", + [ + ("object", 2.0), + ("float64", 2.0), + ("datetime64[ns]", datetime(2011, 11, 1)), + ("Int64", 2), + ("boolean", True), + ], + ) + def test_nanminmax(self, opname, dtype, val, index_or_series): + # GH#7261 + klass = index_or_series + + def check_missing(res): + if dtype == "datetime64[ns]": + return res is NaT + elif dtype in ["Int64", "boolean"]: + return res is pd.NA + else: + return isna(res) + + obj = klass([None], dtype=dtype) + assert check_missing(getattr(obj, opname)()) + assert check_missing(getattr(obj, opname)(skipna=False)) + + obj = klass([], dtype=dtype) + assert check_missing(getattr(obj, opname)()) + assert check_missing(getattr(obj, opname)(skipna=False)) + + if dtype == "object": + # generic test with object only works for empty / all NaN + return + + obj = klass([None, val], dtype=dtype) + assert getattr(obj, opname)() == val + assert check_missing(getattr(obj, opname)(skipna=False)) + + obj = klass([None, val, None], dtype=dtype) + assert getattr(obj, opname)() == val + assert check_missing(getattr(obj, opname)(skipna=False)) + + @pytest.mark.parametrize("opname", ["max", "min"]) + def test_nanargminmax(self, opname, index_or_series): + # GH#7261 + klass = index_or_series + arg_op = "arg" + opname if klass is Index else "idx" + opname + + obj = klass([NaT, datetime(2011, 11, 1)]) + assert getattr(obj, arg_op)() == 1 + + msg = ( + "The behavior of (DatetimeIndex|Series).argmax/argmin with " + "skipna=False and NAs" + ) + if klass is Series: + msg = "The behavior of Series.(idxmax|idxmin) with all-NA" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = getattr(obj, arg_op)(skipna=False) + if klass is Series: + assert np.isnan(result) + else: + assert result == -1 + + obj = klass([NaT, datetime(2011, 11, 1), NaT]) + # check DatetimeIndex non-monotonic path + assert getattr(obj, arg_op)() == 1 + with tm.assert_produces_warning(FutureWarning, match=msg): + result = getattr(obj, arg_op)(skipna=False) + if klass is Series: + assert np.isnan(result) + else: + assert result == -1 + + @pytest.mark.parametrize("opname", ["max", "min"]) + @pytest.mark.parametrize("dtype", ["M8[ns]", "datetime64[ns, UTC]"]) + def test_nanops_empty_object(self, opname, index_or_series, dtype): + klass = index_or_series + arg_op = "arg" + opname if klass is Index else "idx" + opname + + obj = klass([], dtype=dtype) + + assert getattr(obj, opname)() is NaT + assert getattr(obj, opname)(skipna=False) is NaT + + with pytest.raises(ValueError, match="empty sequence"): + getattr(obj, arg_op)() + with pytest.raises(ValueError, match="empty sequence"): + getattr(obj, arg_op)(skipna=False) + + def test_argminmax(self): + obj = Index(np.arange(5, dtype="int64")) + assert obj.argmin() == 0 + assert obj.argmax() == 4 + + obj = Index([np.nan, 1, np.nan, 2]) + assert obj.argmin() == 1 + assert obj.argmax() == 3 + msg = "The behavior of Index.argmax/argmin with skipna=False and NAs" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmin(skipna=False) == -1 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmax(skipna=False) == -1 + + obj = Index([np.nan]) + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmin() == -1 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmax() == -1 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmin(skipna=False) == -1 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmax(skipna=False) == -1 + + msg = "The behavior of DatetimeIndex.argmax/argmin with skipna=False and NAs" + obj = Index([NaT, datetime(2011, 11, 1), datetime(2011, 11, 2), NaT]) + assert obj.argmin() == 1 + assert obj.argmax() == 2 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmin(skipna=False) == -1 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmax(skipna=False) == -1 + + obj = Index([NaT]) + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmin() == -1 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmax() == -1 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmin(skipna=False) == -1 + with tm.assert_produces_warning(FutureWarning, match=msg): + assert obj.argmax(skipna=False) == -1 + + @pytest.mark.parametrize("op, expected_col", [["max", "a"], ["min", "b"]]) + def test_same_tz_min_max_axis_1(self, op, expected_col): + # GH 10390 + df = DataFrame( + date_range("2016-01-01 00:00:00", periods=3, tz="UTC"), columns=["a"] + ) + df["b"] = df.a.subtract(Timedelta(seconds=3600)) + result = getattr(df, op)(axis=1) + expected = df[expected_col].rename(None) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("func", ["maximum", "minimum"]) + def test_numpy_reduction_with_tz_aware_dtype(self, tz_aware_fixture, func): + # GH 15552 + tz = tz_aware_fixture + arg = pd.to_datetime(["2019"]).tz_localize(tz) + expected = Series(arg) + result = getattr(np, func)(expected, expected) + tm.assert_series_equal(result, expected) + + def test_nan_int_timedelta_sum(self): + # GH 27185 + df = DataFrame( + { + "A": Series([1, 2, NaT], dtype="timedelta64[ns]"), + "B": Series([1, 2, np.nan], dtype="Int64"), + } + ) + expected = Series({"A": Timedelta(3), "B": 3}) + result = df.sum() + tm.assert_series_equal(result, expected) + + +class TestIndexReductions: + # Note: the name TestIndexReductions indicates these tests + # were moved from a Index-specific test file, _not_ that these tests are + # intended long-term to be Index-specific + + @pytest.mark.parametrize( + "start,stop,step", + [ + (0, 400, 3), + (500, 0, -6), + (-(10**6), 10**6, 4), + (10**6, -(10**6), -4), + (0, 10, 20), + ], + ) + def test_max_min_range(self, start, stop, step): + # GH#17607 + idx = RangeIndex(start, stop, step) + expected = idx._values.max() + result = idx.max() + assert result == expected + + # skipna should be irrelevant since RangeIndex should never have NAs + result2 = idx.max(skipna=False) + assert result2 == expected + + expected = idx._values.min() + result = idx.min() + assert result == expected + + # skipna should be irrelevant since RangeIndex should never have NAs + result2 = idx.min(skipna=False) + assert result2 == expected + + # empty + idx = RangeIndex(start, stop, -step) + assert isna(idx.max()) + assert isna(idx.min()) + + def test_minmax_timedelta64(self): + # monotonic + idx1 = TimedeltaIndex(["1 days", "2 days", "3 days"]) + assert idx1.is_monotonic_increasing + + # non-monotonic + idx2 = TimedeltaIndex(["1 days", np.nan, "3 days", "NaT"]) + assert not idx2.is_monotonic_increasing + + for idx in [idx1, idx2]: + assert idx.min() == Timedelta("1 days") + assert idx.max() == Timedelta("3 days") + assert idx.argmin() == 0 + assert idx.argmax() == 2 + + @pytest.mark.parametrize("op", ["min", "max"]) + def test_minmax_timedelta_empty_or_na(self, op): + # Return NaT + obj = TimedeltaIndex([]) + assert getattr(obj, op)() is NaT + + obj = TimedeltaIndex([NaT]) + assert getattr(obj, op)() is NaT + + obj = TimedeltaIndex([NaT, NaT, NaT]) + assert getattr(obj, op)() is NaT + + def test_numpy_minmax_timedelta64(self): + td = timedelta_range("16815 days", "16820 days", freq="D") + + assert np.min(td) == Timedelta("16815 days") + assert np.max(td) == Timedelta("16820 days") + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.min(td, out=0) + with pytest.raises(ValueError, match=errmsg): + np.max(td, out=0) + + assert np.argmin(td) == 0 + assert np.argmax(td) == 5 + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.argmin(td, out=0) + with pytest.raises(ValueError, match=errmsg): + np.argmax(td, out=0) + + def test_timedelta_ops(self): + # GH#4984 + # make sure ops return Timedelta + s = Series( + [Timestamp("20130101") + timedelta(seconds=i * i) for i in range(10)] + ) + td = s.diff() + + result = td.mean() + expected = to_timedelta(timedelta(seconds=9)) + assert result == expected + + result = td.to_frame().mean() + assert result[0] == expected + + result = td.quantile(0.1) + expected = Timedelta(np.timedelta64(2600, "ms")) + assert result == expected + + result = td.median() + expected = to_timedelta("00:00:09") + assert result == expected + + result = td.to_frame().median() + assert result[0] == expected + + # GH#6462 + # consistency in returned values for sum + result = td.sum() + expected = to_timedelta("00:01:21") + assert result == expected + + result = td.to_frame().sum() + assert result[0] == expected + + # std + result = td.std() + expected = to_timedelta(Series(td.dropna().values).std()) + assert result == expected + + result = td.to_frame().std() + assert result[0] == expected + + # GH#10040 + # make sure NaT is properly handled by median() + s = Series([Timestamp("2015-02-03"), Timestamp("2015-02-07")]) + assert s.diff().median() == timedelta(days=4) + + s = Series( + [Timestamp("2015-02-03"), Timestamp("2015-02-07"), Timestamp("2015-02-15")] + ) + assert s.diff().median() == timedelta(days=6) + + @pytest.mark.parametrize("opname", ["skew", "kurt", "sem", "prod", "var"]) + def test_invalid_td64_reductions(self, opname): + s = Series( + [Timestamp("20130101") + timedelta(seconds=i * i) for i in range(10)] + ) + td = s.diff() + + msg = "|".join( + [ + f"reduction operation '{opname}' not allowed for this dtype", + rf"cannot perform {opname} with type timedelta64\[ns\]", + f"does not support reduction '{opname}'", + ] + ) + + with pytest.raises(TypeError, match=msg): + getattr(td, opname)() + + with pytest.raises(TypeError, match=msg): + getattr(td.to_frame(), opname)(numeric_only=False) + + def test_minmax_tz(self, tz_naive_fixture): + tz = tz_naive_fixture + # monotonic + idx1 = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz=tz) + assert idx1.is_monotonic_increasing + + # non-monotonic + idx2 = DatetimeIndex( + ["2011-01-01", NaT, "2011-01-03", "2011-01-02", NaT], tz=tz + ) + assert not idx2.is_monotonic_increasing + + for idx in [idx1, idx2]: + assert idx.min() == Timestamp("2011-01-01", tz=tz) + assert idx.max() == Timestamp("2011-01-03", tz=tz) + assert idx.argmin() == 0 + assert idx.argmax() == 2 + + @pytest.mark.parametrize("op", ["min", "max"]) + def test_minmax_nat_datetime64(self, op): + # Return NaT + obj = DatetimeIndex([]) + assert isna(getattr(obj, op)()) + + obj = DatetimeIndex([NaT]) + assert isna(getattr(obj, op)()) + + obj = DatetimeIndex([NaT, NaT, NaT]) + assert isna(getattr(obj, op)()) + + def test_numpy_minmax_integer(self): + # GH#26125 + idx = Index([1, 2, 3]) + + expected = idx.values.max() + result = np.max(idx) + assert result == expected + + expected = idx.values.min() + result = np.min(idx) + assert result == expected + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.min(idx, out=0) + with pytest.raises(ValueError, match=errmsg): + np.max(idx, out=0) + + expected = idx.values.argmax() + result = np.argmax(idx) + assert result == expected + + expected = idx.values.argmin() + result = np.argmin(idx) + assert result == expected + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.argmin(idx, out=0) + with pytest.raises(ValueError, match=errmsg): + np.argmax(idx, out=0) + + def test_numpy_minmax_range(self): + # GH#26125 + idx = RangeIndex(0, 10, 3) + + result = np.max(idx) + assert result == 9 + + result = np.min(idx) + assert result == 0 + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.min(idx, out=0) + with pytest.raises(ValueError, match=errmsg): + np.max(idx, out=0) + + # No need to test again argmax/argmin compat since the implementation + # is the same as basic integer index + + def test_numpy_minmax_datetime64(self): + dr = date_range(start="2016-01-15", end="2016-01-20") + + assert np.min(dr) == Timestamp("2016-01-15 00:00:00") + assert np.max(dr) == Timestamp("2016-01-20 00:00:00") + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.min(dr, out=0) + + with pytest.raises(ValueError, match=errmsg): + np.max(dr, out=0) + + assert np.argmin(dr) == 0 + assert np.argmax(dr) == 5 + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.argmin(dr, out=0) + + with pytest.raises(ValueError, match=errmsg): + np.argmax(dr, out=0) + + def test_minmax_period(self): + # monotonic + idx1 = PeriodIndex([NaT, "2011-01-01", "2011-01-02", "2011-01-03"], freq="D") + assert not idx1.is_monotonic_increasing + assert idx1[1:].is_monotonic_increasing + + # non-monotonic + idx2 = PeriodIndex( + ["2011-01-01", NaT, "2011-01-03", "2011-01-02", NaT], freq="D" + ) + assert not idx2.is_monotonic_increasing + + for idx in [idx1, idx2]: + assert idx.min() == Period("2011-01-01", freq="D") + assert idx.max() == Period("2011-01-03", freq="D") + assert idx1.argmin() == 1 + assert idx2.argmin() == 0 + assert idx1.argmax() == 3 + assert idx2.argmax() == 2 + + @pytest.mark.parametrize("op", ["min", "max"]) + @pytest.mark.parametrize("data", [[], [NaT], [NaT, NaT, NaT]]) + def test_minmax_period_empty_nat(self, op, data): + # Return NaT + obj = PeriodIndex(data, freq="M") + result = getattr(obj, op)() + assert result is NaT + + def test_numpy_minmax_period(self): + pr = period_range(start="2016-01-15", end="2016-01-20") + + assert np.min(pr) == Period("2016-01-15", freq="D") + assert np.max(pr) == Period("2016-01-20", freq="D") + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.min(pr, out=0) + with pytest.raises(ValueError, match=errmsg): + np.max(pr, out=0) + + assert np.argmin(pr) == 0 + assert np.argmax(pr) == 5 + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.argmin(pr, out=0) + with pytest.raises(ValueError, match=errmsg): + np.argmax(pr, out=0) + + def test_min_max_categorical(self): + ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + msg = ( + r"Categorical is not ordered for operation min\n" + r"you can use .as_ordered\(\) to change the Categorical to an ordered one\n" + ) + with pytest.raises(TypeError, match=msg): + ci.min() + msg = ( + r"Categorical is not ordered for operation max\n" + r"you can use .as_ordered\(\) to change the Categorical to an ordered one\n" + ) + with pytest.raises(TypeError, match=msg): + ci.max() + + ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=True) + assert ci.min() == "c" + assert ci.max() == "b" + + +class TestSeriesReductions: + # Note: the name TestSeriesReductions indicates these tests + # were moved from a series-specific test file, _not_ that these tests are + # intended long-term to be series-specific + + def test_sum_inf(self): + s = Series(np.random.default_rng(2).standard_normal(10)) + s2 = s.copy() + + s[5:8] = np.inf + s2[5:8] = np.nan + + assert np.isinf(s.sum()) + + arr = np.random.default_rng(2).standard_normal((100, 100)).astype("f4") + arr[:, 2] = np.inf + + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + tm.assert_almost_equal(s.sum(), s2.sum()) + + res = nanops.nansum(arr, axis=1) + assert np.isinf(res).all() + + @pytest.mark.parametrize( + "dtype", ["float64", "Float32", "Int64", "boolean", "object"] + ) + @pytest.mark.parametrize("use_bottleneck", [True, False]) + @pytest.mark.parametrize("method, unit", [("sum", 0.0), ("prod", 1.0)]) + def test_empty(self, method, unit, use_bottleneck, dtype): + with pd.option_context("use_bottleneck", use_bottleneck): + # GH#9422 / GH#18921 + # Entirely empty + s = Series([], dtype=dtype) + # NA by default + result = getattr(s, method)() + assert result == unit + + # Explicit + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) + assert isna(result) + + # Skipna, default + result = getattr(s, method)(skipna=True) + result == unit + + # Skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) + assert isna(result) + + result = getattr(s, method)(skipna=False, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=False, min_count=1) + assert isna(result) + + # All-NA + s = Series([np.nan], dtype=dtype) + # NA by default + result = getattr(s, method)() + assert result == unit + + # Explicit + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) + assert isna(result) + + # Skipna, default + result = getattr(s, method)(skipna=True) + result == unit + + # skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) + assert isna(result) + + # Mix of valid, empty + s = Series([np.nan, 1], dtype=dtype) + # Default + result = getattr(s, method)() + assert result == 1.0 + + # Explicit + result = getattr(s, method)(min_count=0) + assert result == 1.0 + + result = getattr(s, method)(min_count=1) + assert result == 1.0 + + # Skipna + result = getattr(s, method)(skipna=True) + assert result == 1.0 + + result = getattr(s, method)(skipna=True, min_count=0) + assert result == 1.0 + + # GH#844 (changed in GH#9422) + df = DataFrame(np.empty((10, 0)), dtype=dtype) + assert (getattr(df, method)(1) == unit).all() + + s = Series([1], dtype=dtype) + result = getattr(s, method)(min_count=2) + assert isna(result) + + result = getattr(s, method)(skipna=False, min_count=2) + assert isna(result) + + s = Series([np.nan], dtype=dtype) + result = getattr(s, method)(min_count=2) + assert isna(result) + + s = Series([np.nan, 1], dtype=dtype) + result = getattr(s, method)(min_count=2) + assert isna(result) + + @pytest.mark.parametrize("method", ["mean", "var"]) + @pytest.mark.parametrize("dtype", ["Float64", "Int64", "boolean"]) + def test_ops_consistency_on_empty_nullable(self, method, dtype): + # GH#34814 + # consistency for nullable dtypes on empty or ALL-NA mean + + # empty series + eser = Series([], dtype=dtype) + result = getattr(eser, method)() + assert result is pd.NA + + # ALL-NA series + nser = Series([np.nan], dtype=dtype) + result = getattr(nser, method)() + assert result is pd.NA + + @pytest.mark.parametrize("method", ["mean", "median", "std", "var"]) + def test_ops_consistency_on_empty(self, method): + # GH#7869 + # consistency on empty + + # float + result = getattr(Series(dtype=float), method)() + assert isna(result) + + # timedelta64[ns] + tdser = Series([], dtype="m8[ns]") + if method == "var": + msg = "|".join( + [ + "operation 'var' not allowed", + r"cannot perform var with type timedelta64\[ns\]", + "does not support reduction 'var'", + ] + ) + with pytest.raises(TypeError, match=msg): + getattr(tdser, method)() + else: + result = getattr(tdser, method)() + assert result is NaT + + def test_nansum_buglet(self): + ser = Series([1.0, np.nan], index=[0, 1]) + result = np.nansum(ser) + tm.assert_almost_equal(result, 1) + + @pytest.mark.parametrize("use_bottleneck", [True, False]) + @pytest.mark.parametrize("dtype", ["int32", "int64"]) + def test_sum_overflow_int(self, use_bottleneck, dtype): + with pd.option_context("use_bottleneck", use_bottleneck): + # GH#6915 + # overflowing on the smaller int dtypes + v = np.arange(5000000, dtype=dtype) + s = Series(v) + + result = s.sum(skipna=False) + assert int(result) == v.sum(dtype="int64") + result = s.min(skipna=False) + assert int(result) == 0 + result = s.max(skipna=False) + assert int(result) == v[-1] + + @pytest.mark.parametrize("use_bottleneck", [True, False]) + @pytest.mark.parametrize("dtype", ["float32", "float64"]) + def test_sum_overflow_float(self, use_bottleneck, dtype): + with pd.option_context("use_bottleneck", use_bottleneck): + v = np.arange(5000000, dtype=dtype) + s = Series(v) + + result = s.sum(skipna=False) + assert result == v.sum(dtype=dtype) + result = s.min(skipna=False) + assert np.allclose(float(result), 0.0) + result = s.max(skipna=False) + assert np.allclose(float(result), v[-1]) + + def test_mean_masked_overflow(self): + # GH#48378 + val = 100_000_000_000_000_000 + n_elements = 100 + na = np.array([val] * n_elements) + ser = Series([val] * n_elements, dtype="Int64") + + result_numpy = np.mean(na) + result_masked = ser.mean() + assert result_masked - result_numpy == 0 + assert result_masked == 1e17 + + @pytest.mark.parametrize("ddof, exp", [(1, 2.5), (0, 2.0)]) + def test_var_masked_array(self, ddof, exp): + # GH#48379 + ser = Series([1, 2, 3, 4, 5], dtype="Int64") + ser_numpy_dtype = Series([1, 2, 3, 4, 5], dtype="int64") + result = ser.var(ddof=ddof) + result_numpy_dtype = ser_numpy_dtype.var(ddof=ddof) + assert result == result_numpy_dtype + assert result == exp + + @pytest.mark.parametrize("dtype", ("m8[ns]", "m8[ns]", "M8[ns]", "M8[ns, UTC]")) + @pytest.mark.parametrize("skipna", [True, False]) + def test_empty_timeseries_reductions_return_nat(self, dtype, skipna): + # covers GH#11245 + assert Series([], dtype=dtype).min(skipna=skipna) is NaT + assert Series([], dtype=dtype).max(skipna=skipna) is NaT + + def test_numpy_argmin(self): + # See GH#16830 + data = np.arange(1, 11) + + s = Series(data, index=data) + result = np.argmin(s) + + expected = np.argmin(data) + assert result == expected + + result = s.argmin() + + assert result == expected + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argmin(s, out=data) + + def test_numpy_argmax(self): + # See GH#16830 + data = np.arange(1, 11) + + ser = Series(data, index=data) + result = np.argmax(ser) + expected = np.argmax(data) + assert result == expected + + result = ser.argmax() + + assert result == expected + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argmax(ser, out=data) + + def test_idxmin_dt64index(self, unit): + # GH#43587 should have NaT instead of NaN + dti = DatetimeIndex(["NaT", "2015-02-08", "NaT"]).as_unit(unit) + ser = Series([1.0, 2.0, np.nan], index=dti) + msg = "The behavior of Series.idxmin with all-NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = ser.idxmin(skipna=False) + assert res is NaT + msg = "The behavior of Series.idxmax with all-NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = ser.idxmax(skipna=False) + assert res is NaT + + df = ser.to_frame() + msg = "The behavior of DataFrame.idxmin with all-NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.idxmin(skipna=False) + assert res.dtype == f"M8[{unit}]" + assert res.isna().all() + msg = "The behavior of DataFrame.idxmax with all-NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.idxmax(skipna=False) + assert res.dtype == f"M8[{unit}]" + assert res.isna().all() + + def test_idxmin(self): + # test idxmin + # _check_stat_op approach can not be used here because of isna check. + string_series = Series(range(20), dtype=np.float64, name="series") + + # add some NaNs + string_series[5:15] = np.nan + + # skipna or no + assert string_series[string_series.idxmin()] == string_series.min() + msg = "The behavior of Series.idxmin" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isna(string_series.idxmin(skipna=False)) + + # no NaNs + nona = string_series.dropna() + assert nona[nona.idxmin()] == nona.min() + assert nona.index.values.tolist().index(nona.idxmin()) == nona.values.argmin() + + # all NaNs + allna = string_series * np.nan + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isna(allna.idxmin()) + + # datetime64[ns] + s = Series(date_range("20130102", periods=6)) + result = s.idxmin() + assert result == 0 + + s[0] = np.nan + result = s.idxmin() + assert result == 1 + + def test_idxmax(self): + # test idxmax + # _check_stat_op approach can not be used here because of isna check. + string_series = Series(range(20), dtype=np.float64, name="series") + + # add some NaNs + string_series[5:15] = np.nan + + # skipna or no + assert string_series[string_series.idxmax()] == string_series.max() + msg = "The behavior of Series.idxmax with all-NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isna(string_series.idxmax(skipna=False)) + + # no NaNs + nona = string_series.dropna() + assert nona[nona.idxmax()] == nona.max() + assert nona.index.values.tolist().index(nona.idxmax()) == nona.values.argmax() + + # all NaNs + allna = string_series * np.nan + msg = "The behavior of Series.idxmax with all-NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isna(allna.idxmax()) + + s = Series(date_range("20130102", periods=6)) + result = s.idxmax() + assert result == 5 + + s[5] = np.nan + result = s.idxmax() + assert result == 4 + + # Index with float64 dtype + # GH#5914 + s = Series([1, 2, 3], [1.1, 2.1, 3.1]) + result = s.idxmax() + assert result == 3.1 + result = s.idxmin() + assert result == 1.1 + + s = Series(s.index, s.index) + result = s.idxmax() + assert result == 3.1 + result = s.idxmin() + assert result == 1.1 + + def test_all_any(self): + ts = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + bool_series = ts > 0 + assert not bool_series.all() + assert bool_series.any() + + # Alternative types, with implicit 'object' dtype. + s = Series(["abc", True]) + assert s.any() + + def test_numpy_all_any(self, index_or_series): + # GH#40180 + idx = index_or_series([0, 1, 2]) + assert not np.all(idx) + assert np.any(idx) + idx = Index([1, 2, 3]) + assert np.all(idx) + + def test_all_any_skipna(self): + # Check skipna, with implicit 'object' dtype. + s1 = Series([np.nan, True]) + s2 = Series([np.nan, False]) + assert s1.all(skipna=False) # nan && True => True + assert s1.all(skipna=True) + assert s2.any(skipna=False) + assert not s2.any(skipna=True) + + def test_all_any_bool_only(self): + s = Series([False, False, True, True, False, True], index=[0, 0, 1, 1, 2, 2]) + + # GH#47500 - test bool_only works + assert s.any(bool_only=True) + assert not s.all(bool_only=True) + + @pytest.mark.parametrize("bool_agg_func", ["any", "all"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_any_all_object_dtype(self, bool_agg_func, skipna): + # GH#12863 + ser = Series(["a", "b", "c", "d", "e"], dtype=object) + result = getattr(ser, bool_agg_func)(skipna=skipna) + expected = True + + assert result == expected + + @pytest.mark.parametrize("bool_agg_func", ["any", "all"]) + @pytest.mark.parametrize( + "data", [[False, None], [None, False], [False, np.nan], [np.nan, False]] + ) + def test_any_all_object_dtype_missing(self, data, bool_agg_func): + # GH#27709 + ser = Series(data) + result = getattr(ser, bool_agg_func)(skipna=False) + + # None is treated is False, but np.nan is treated as True + expected = bool_agg_func == "any" and None not in data + assert result == expected + + @pytest.mark.parametrize("dtype", ["boolean", "Int64", "UInt64", "Float64"]) + @pytest.mark.parametrize("bool_agg_func", ["any", "all"]) + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize( + # expected_data indexed as [[skipna=False/any, skipna=False/all], + # [skipna=True/any, skipna=True/all]] + "data,expected_data", + [ + ([0, 0, 0], [[False, False], [False, False]]), + ([1, 1, 1], [[True, True], [True, True]]), + ([pd.NA, pd.NA, pd.NA], [[pd.NA, pd.NA], [False, True]]), + ([0, pd.NA, 0], [[pd.NA, False], [False, False]]), + ([1, pd.NA, 1], [[True, pd.NA], [True, True]]), + ([1, pd.NA, 0], [[True, False], [True, False]]), + ], + ) + def test_any_all_nullable_kleene_logic( + self, bool_agg_func, skipna, data, dtype, expected_data + ): + # GH-37506, GH-41967 + ser = Series(data, dtype=dtype) + expected = expected_data[skipna][bool_agg_func == "all"] + + result = getattr(ser, bool_agg_func)(skipna=skipna) + assert (result is pd.NA and expected is pd.NA) or result == expected + + def test_any_axis1_bool_only(self): + # GH#32432 + df = DataFrame({"A": [True, False], "B": [1, 2]}) + result = df.any(axis=1, bool_only=True) + expected = Series([True, False]) + tm.assert_series_equal(result, expected) + + def test_any_all_datetimelike(self): + # GH#38723 these may not be the desired long-term behavior (GH#34479) + # but in the interim should be internally consistent + dta = date_range("1995-01-02", periods=3)._data + ser = Series(dta) + df = DataFrame(ser) + + msg = "'(any|all)' with datetime64 dtypes is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#34479 + assert dta.all() + assert dta.any() + + assert ser.all() + assert ser.any() + + assert df.any().all() + assert df.all().all() + + dta = dta.tz_localize("UTC") + ser = Series(dta) + df = DataFrame(ser) + + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#34479 + assert dta.all() + assert dta.any() + + assert ser.all() + assert ser.any() + + assert df.any().all() + assert df.all().all() + + tda = dta - dta[0] + ser = Series(tda) + df = DataFrame(ser) + + assert tda.any() + assert not tda.all() + + assert ser.any() + assert not ser.all() + + assert df.any().all() + assert not df.all().any() + + def test_any_all_pyarrow_string(self): + # GH#54591 + pytest.importorskip("pyarrow") + ser = Series(["", "a"], dtype="string[pyarrow_numpy]") + assert ser.any() + assert not ser.all() + + ser = Series([None, "a"], dtype="string[pyarrow_numpy]") + assert ser.any() + assert ser.all() + assert not ser.all(skipna=False) + + ser = Series([None, ""], dtype="string[pyarrow_numpy]") + assert not ser.any() + assert not ser.all() + + ser = Series(["a", "b"], dtype="string[pyarrow_numpy]") + assert ser.any() + assert ser.all() + + def test_timedelta64_analytics(self): + # index min/max + dti = date_range("2012-1-1", periods=3, freq="D") + td = Series(dti) - Timestamp("20120101") + + result = td.idxmin() + assert result == 0 + + result = td.idxmax() + assert result == 2 + + # GH#2982 + # with NaT + td[0] = np.nan + + result = td.idxmin() + assert result == 1 + + result = td.idxmax() + assert result == 2 + + # abs + s1 = Series(date_range("20120101", periods=3)) + s2 = Series(date_range("20120102", periods=3)) + expected = Series(s2 - s1) + + result = np.abs(s1 - s2) + tm.assert_series_equal(result, expected) + + result = (s1 - s2).abs() + tm.assert_series_equal(result, expected) + + # max/min + result = td.max() + expected = Timedelta("2 days") + assert result == expected + + result = td.min() + expected = Timedelta("1 days") + assert result == expected + + @pytest.mark.parametrize( + "test_input,error_type", + [ + (Series([], dtype="float64"), ValueError), + # For strings, or any Series with dtype 'O' + (Series(["foo", "bar", "baz"]), TypeError), + (Series([(1,), (2,)]), TypeError), + # For mixed data types + (Series(["foo", "foo", "bar", "bar", None, np.nan, "baz"]), TypeError), + ], + ) + def test_assert_idxminmax_empty_raises(self, test_input, error_type): + """ + Cases where ``Series.argmax`` and related should raise an exception + """ + test_input = Series([], dtype="float64") + msg = "attempt to get argmin of an empty sequence" + with pytest.raises(ValueError, match=msg): + test_input.idxmin() + with pytest.raises(ValueError, match=msg): + test_input.idxmin(skipna=False) + msg = "attempt to get argmax of an empty sequence" + with pytest.raises(ValueError, match=msg): + test_input.idxmax() + with pytest.raises(ValueError, match=msg): + test_input.idxmax(skipna=False) + + def test_idxminmax_object_dtype(self, using_infer_string): + # pre-2.1 object-dtype was disallowed for argmin/max + ser = Series(["foo", "bar", "baz"]) + assert ser.idxmax() == 0 + assert ser.idxmax(skipna=False) == 0 + assert ser.idxmin() == 1 + assert ser.idxmin(skipna=False) == 1 + + ser2 = Series([(1,), (2,)]) + assert ser2.idxmax() == 1 + assert ser2.idxmax(skipna=False) == 1 + assert ser2.idxmin() == 0 + assert ser2.idxmin(skipna=False) == 0 + + if not using_infer_string: + # attempting to compare np.nan with string raises + ser3 = Series(["foo", "foo", "bar", "bar", None, np.nan, "baz"]) + msg = "'>' not supported between instances of 'float' and 'str'" + with pytest.raises(TypeError, match=msg): + ser3.idxmax() + with pytest.raises(TypeError, match=msg): + ser3.idxmax(skipna=False) + msg = "'<' not supported between instances of 'float' and 'str'" + with pytest.raises(TypeError, match=msg): + ser3.idxmin() + with pytest.raises(TypeError, match=msg): + ser3.idxmin(skipna=False) + + def test_idxminmax_object_frame(self): + # GH#4279 + df = DataFrame([["zimm", 2.5], ["biff", 1.0], ["bid", 12.0]]) + res = df.idxmax() + exp = Series([0, 2]) + tm.assert_series_equal(res, exp) + + def test_idxminmax_object_tuples(self): + # GH#43697 + ser = Series([(1, 3), (2, 2), (3, 1)]) + assert ser.idxmax() == 2 + assert ser.idxmin() == 0 + assert ser.idxmax(skipna=False) == 2 + assert ser.idxmin(skipna=False) == 0 + + def test_idxminmax_object_decimals(self): + # GH#40685 + df = DataFrame( + { + "idx": [0, 1], + "x": [Decimal("8.68"), Decimal("42.23")], + "y": [Decimal("7.11"), Decimal("79.61")], + } + ) + res = df.idxmax() + exp = Series({"idx": 1, "x": 1, "y": 1}) + tm.assert_series_equal(res, exp) + + res2 = df.idxmin() + exp2 = exp - 1 + tm.assert_series_equal(res2, exp2) + + def test_argminmax_object_ints(self): + # GH#18021 + ser = Series([0, 1], dtype="object") + assert ser.argmax() == 1 + assert ser.argmin() == 0 + assert ser.argmax(skipna=False) == 1 + assert ser.argmin(skipna=False) == 0 + + def test_idxminmax_with_inf(self): + # For numeric data with NA and Inf (GH #13595) + s = Series([0, -np.inf, np.inf, np.nan]) + + assert s.idxmin() == 1 + msg = "The behavior of Series.idxmin with all-NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert np.isnan(s.idxmin(skipna=False)) + + assert s.idxmax() == 2 + msg = "The behavior of Series.idxmax with all-NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert np.isnan(s.idxmax(skipna=False)) + + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + # Using old-style behavior that treats floating point nan, -inf, and + # +inf as missing + with pd.option_context("mode.use_inf_as_na", True): + assert s.idxmin() == 0 + assert np.isnan(s.idxmin(skipna=False)) + assert s.idxmax() == 0 + np.isnan(s.idxmax(skipna=False)) + + def test_sum_uint64(self): + # GH 53401 + s = Series([10000000000000000000], dtype="uint64") + result = s.sum() + expected = np.uint64(10000000000000000000) + tm.assert_almost_equal(result, expected) + + +class TestDatetime64SeriesReductions: + # Note: the name TestDatetime64SeriesReductions indicates these tests + # were moved from a series-specific test file, _not_ that these tests are + # intended long-term to be series-specific + + @pytest.mark.parametrize( + "nat_ser", + [ + Series([NaT, NaT]), + Series([NaT, Timedelta("nat")]), + Series([Timedelta("nat"), Timedelta("nat")]), + ], + ) + def test_minmax_nat_series(self, nat_ser): + # GH#23282 + assert nat_ser.min() is NaT + assert nat_ser.max() is NaT + assert nat_ser.min(skipna=False) is NaT + assert nat_ser.max(skipna=False) is NaT + + @pytest.mark.parametrize( + "nat_df", + [ + DataFrame([NaT, NaT]), + DataFrame([NaT, Timedelta("nat")]), + DataFrame([Timedelta("nat"), Timedelta("nat")]), + ], + ) + def test_minmax_nat_dataframe(self, nat_df): + # GH#23282 + assert nat_df.min()[0] is NaT + assert nat_df.max()[0] is NaT + assert nat_df.min(skipna=False)[0] is NaT + assert nat_df.max(skipna=False)[0] is NaT + + def test_min_max(self): + rng = date_range("1/1/2000", "12/31/2000") + rng2 = rng.take(np.random.default_rng(2).permutation(len(rng))) + + the_min = rng2.min() + the_max = rng2.max() + assert isinstance(the_min, Timestamp) + assert isinstance(the_max, Timestamp) + assert the_min == rng[0] + assert the_max == rng[-1] + + assert rng.min() == rng[0] + assert rng.max() == rng[-1] + + def test_min_max_series(self): + rng = date_range("1/1/2000", periods=10, freq="4h") + lvls = ["A", "A", "A", "B", "B", "B", "C", "C", "C", "C"] + df = DataFrame( + { + "TS": rng, + "V": np.random.default_rng(2).standard_normal(len(rng)), + "L": lvls, + } + ) + + result = df.TS.max() + exp = Timestamp(df.TS.iat[-1]) + assert isinstance(result, Timestamp) + assert result == exp + + result = df.TS.min() + exp = Timestamp(df.TS.iat[0]) + assert isinstance(result, Timestamp) + assert result == exp + + +class TestCategoricalSeriesReductions: + # Note: the name TestCategoricalSeriesReductions indicates these tests + # were moved from a series-specific test file, _not_ that these tests are + # intended long-term to be series-specific + + @pytest.mark.parametrize("function", ["min", "max"]) + def test_min_max_unordered_raises(self, function): + # unordered cats have no min/max + cat = Series(Categorical(["a", "b", "c", "d"], ordered=False)) + msg = f"Categorical is not ordered for operation {function}" + with pytest.raises(TypeError, match=msg): + getattr(cat, function)() + + @pytest.mark.parametrize( + "values, categories", + [ + (list("abc"), list("abc")), + (list("abc"), list("cba")), + (list("abc") + [np.nan], list("cba")), + ([1, 2, 3], [3, 2, 1]), + ([1, 2, 3, np.nan], [3, 2, 1]), + ], + ) + @pytest.mark.parametrize("function", ["min", "max"]) + def test_min_max_ordered(self, values, categories, function): + # GH 25303 + cat = Series(Categorical(values, categories=categories, ordered=True)) + result = getattr(cat, function)(skipna=True) + expected = categories[0] if function == "min" else categories[2] + assert result == expected + + @pytest.mark.parametrize("function", ["min", "max"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_min_max_ordered_with_nan_only(self, function, skipna): + # https://github.com/pandas-dev/pandas/issues/33450 + cat = Series(Categorical([np.nan], categories=[1, 2], ordered=True)) + result = getattr(cat, function)(skipna=skipna) + assert result is np.nan + + @pytest.mark.parametrize("function", ["min", "max"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_min_max_skipna(self, function, skipna): + cat = Series( + Categorical(["a", "b", np.nan, "a"], categories=["b", "a"], ordered=True) + ) + result = getattr(cat, function)(skipna=skipna) + + if skipna is True: + expected = "b" if function == "min" else "a" + assert result == expected + else: + assert result is np.nan + + +class TestSeriesMode: + # Note: the name TestSeriesMode indicates these tests + # were moved from a series-specific test file, _not_ that these tests are + # intended long-term to be series-specific + + @pytest.mark.parametrize( + "dropna, expected", + [(True, Series([], dtype=np.float64)), (False, Series([], dtype=np.float64))], + ) + def test_mode_empty(self, dropna, expected): + s = Series([], dtype=np.float64) + result = s.mode(dropna) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dropna, data, expected", + [ + (True, [1, 1, 1, 2], [1]), + (True, [1, 1, 1, 2, 3, 3, 3], [1, 3]), + (False, [1, 1, 1, 2], [1]), + (False, [1, 1, 1, 2, 3, 3, 3], [1, 3]), + ], + ) + @pytest.mark.parametrize( + "dt", list(np.typecodes["AllInteger"] + np.typecodes["Float"]) + ) + def test_mode_numerical(self, dropna, data, expected, dt): + s = Series(data, dtype=dt) + result = s.mode(dropna) + expected = Series(expected, dtype=dt) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dropna, expected", [(True, [1.0]), (False, [1, np.nan])]) + def test_mode_numerical_nan(self, dropna, expected): + s = Series([1, 1, 2, np.nan, np.nan]) + result = s.mode(dropna) + expected = Series(expected) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dropna, expected1, expected2, expected3", + [(True, ["b"], ["bar"], ["nan"]), (False, ["b"], [np.nan], ["nan"])], + ) + def test_mode_str_obj(self, dropna, expected1, expected2, expected3): + # Test string and object types. + data = ["a"] * 2 + ["b"] * 3 + + s = Series(data, dtype="c") + result = s.mode(dropna) + expected1 = Series(expected1, dtype="c") + tm.assert_series_equal(result, expected1) + + data = ["foo", "bar", "bar", np.nan, np.nan, np.nan] + + s = Series(data, dtype=object) + result = s.mode(dropna) + expected2 = Series(expected2, dtype=None if expected2 == ["bar"] else object) + tm.assert_series_equal(result, expected2) + + data = ["foo", "bar", "bar", np.nan, np.nan, np.nan] + + s = Series(data, dtype=object).astype(str) + result = s.mode(dropna) + expected3 = Series(expected3) + tm.assert_series_equal(result, expected3) + + @pytest.mark.parametrize( + "dropna, expected1, expected2", + [(True, ["foo"], ["foo"]), (False, ["foo"], [np.nan])], + ) + def test_mode_mixeddtype(self, dropna, expected1, expected2): + s = Series([1, "foo", "foo"]) + result = s.mode(dropna) + expected = Series(expected1) + tm.assert_series_equal(result, expected) + + s = Series([1, "foo", "foo", np.nan, np.nan, np.nan]) + result = s.mode(dropna) + expected = Series(expected2, dtype=None if expected2 == ["foo"] else object) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dropna, expected1, expected2", + [ + ( + True, + ["1900-05-03", "2011-01-03", "2013-01-02"], + ["2011-01-03", "2013-01-02"], + ), + (False, [np.nan], [np.nan, "2011-01-03", "2013-01-02"]), + ], + ) + def test_mode_datetime(self, dropna, expected1, expected2): + s = Series( + ["2011-01-03", "2013-01-02", "1900-05-03", "nan", "nan"], dtype="M8[ns]" + ) + result = s.mode(dropna) + expected1 = Series(expected1, dtype="M8[ns]") + tm.assert_series_equal(result, expected1) + + s = Series( + [ + "2011-01-03", + "2013-01-02", + "1900-05-03", + "2011-01-03", + "2013-01-02", + "nan", + "nan", + ], + dtype="M8[ns]", + ) + result = s.mode(dropna) + expected2 = Series(expected2, dtype="M8[ns]") + tm.assert_series_equal(result, expected2) + + @pytest.mark.parametrize( + "dropna, expected1, expected2", + [ + (True, ["-1 days", "0 days", "1 days"], ["2 min", "1 day"]), + (False, [np.nan], [np.nan, "2 min", "1 day"]), + ], + ) + def test_mode_timedelta(self, dropna, expected1, expected2): + # gh-5986: Test timedelta types. + + s = Series( + ["1 days", "-1 days", "0 days", "nan", "nan"], dtype="timedelta64[ns]" + ) + result = s.mode(dropna) + expected1 = Series(expected1, dtype="timedelta64[ns]") + tm.assert_series_equal(result, expected1) + + s = Series( + [ + "1 day", + "1 day", + "-1 day", + "-1 day 2 min", + "2 min", + "2 min", + "nan", + "nan", + ], + dtype="timedelta64[ns]", + ) + result = s.mode(dropna) + expected2 = Series(expected2, dtype="timedelta64[ns]") + tm.assert_series_equal(result, expected2) + + @pytest.mark.parametrize( + "dropna, expected1, expected2, expected3", + [ + ( + True, + Categorical([1, 2], categories=[1, 2]), + Categorical(["a"], categories=[1, "a"]), + Categorical([3, 1], categories=[3, 2, 1], ordered=True), + ), + ( + False, + Categorical([np.nan], categories=[1, 2]), + Categorical([np.nan, "a"], categories=[1, "a"]), + Categorical([np.nan, 3, 1], categories=[3, 2, 1], ordered=True), + ), + ], + ) + def test_mode_category(self, dropna, expected1, expected2, expected3): + s = Series(Categorical([1, 2, np.nan, np.nan])) + result = s.mode(dropna) + expected1 = Series(expected1, dtype="category") + tm.assert_series_equal(result, expected1) + + s = Series(Categorical([1, "a", "a", np.nan, np.nan])) + result = s.mode(dropna) + expected2 = Series(expected2, dtype="category") + tm.assert_series_equal(result, expected2) + + s = Series( + Categorical( + [1, 1, 2, 3, 3, np.nan, np.nan], categories=[3, 2, 1], ordered=True + ) + ) + result = s.mode(dropna) + expected3 = Series(expected3, dtype="category") + tm.assert_series_equal(result, expected3) + + @pytest.mark.parametrize( + "dropna, expected1, expected2", + [(True, [2**63], [1, 2**63]), (False, [2**63], [1, 2**63])], + ) + def test_mode_intoverflow(self, dropna, expected1, expected2): + # Test for uint64 overflow. + s = Series([1, 2**63, 2**63], dtype=np.uint64) + result = s.mode(dropna) + expected1 = Series(expected1, dtype=np.uint64) + tm.assert_series_equal(result, expected1) + + s = Series([1, 2**63], dtype=np.uint64) + result = s.mode(dropna) + expected2 = Series(expected2, dtype=np.uint64) + tm.assert_series_equal(result, expected2) + + def test_mode_sortwarning(self): + # Check for the warning that is raised when the mode + # results cannot be sorted + + expected = Series(["foo", np.nan]) + s = Series([1, "foo", "foo", np.nan, np.nan]) + + with tm.assert_produces_warning(UserWarning): + result = s.mode(dropna=False) + result = result.sort_values().reset_index(drop=True) + + tm.assert_series_equal(result, expected) + + def test_mode_boolean_with_na(self): + # GH#42107 + ser = Series([True, False, True, pd.NA], dtype="boolean") + result = ser.mode() + expected = Series({0: True}, dtype="boolean") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "array,expected,dtype", + [ + ( + [0, 1j, 1, 1, 1 + 1j, 1 + 2j], + Series([1], dtype=np.complex128), + np.complex128, + ), + ( + [0, 1j, 1, 1, 1 + 1j, 1 + 2j], + Series([1], dtype=np.complex64), + np.complex64, + ), + ( + [1 + 1j, 2j, 1 + 1j], + Series([1 + 1j], dtype=np.complex128), + np.complex128, + ), + ], + ) + def test_single_mode_value_complex(self, array, expected, dtype): + result = Series(array, dtype=dtype).mode() + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "array,expected,dtype", + [ + ( + # no modes + [0, 1j, 1, 1 + 1j, 1 + 2j], + Series([0j, 1j, 1 + 0j, 1 + 1j, 1 + 2j], dtype=np.complex128), + np.complex128, + ), + ( + [1 + 1j, 2j, 1 + 1j, 2j, 3], + Series([2j, 1 + 1j], dtype=np.complex64), + np.complex64, + ), + ], + ) + def test_multimode_complex(self, array, expected, dtype): + # GH 17927 + # mode tries to sort multimodal series. + # Complex numbers are sorted by their magnitude + result = Series(array, dtype=dtype).mode() + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/test_stat_reductions.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/test_stat_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..8fbb78737474c8abf34b8720603e32f6a93d83e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/reductions/test_stat_reductions.py @@ -0,0 +1,276 @@ +""" +Tests for statistical reductions of 2nd moment or higher: var, skew, kurt, ... +""" +import inspect + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDatetimeLikeStatReductions: + @pytest.mark.parametrize("box", [Series, pd.Index, pd.array]) + def test_dt64_mean(self, tz_naive_fixture, box): + tz = tz_naive_fixture + + dti = date_range("2001-01-01", periods=11, tz=tz) + # shuffle so that we are not just working with monotone-increasing + dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6]) + dtarr = dti._data + + obj = box(dtarr) + assert obj.mean() == pd.Timestamp("2001-01-06", tz=tz) + assert obj.mean(skipna=False) == pd.Timestamp("2001-01-06", tz=tz) + + # dtarr[-2] will be the first date 2001-01-1 + dtarr[-2] = pd.NaT + + obj = box(dtarr) + assert obj.mean() == pd.Timestamp("2001-01-06 07:12:00", tz=tz) + assert obj.mean(skipna=False) is pd.NaT + + @pytest.mark.parametrize("box", [Series, pd.Index, pd.array]) + @pytest.mark.parametrize("freq", ["s", "h", "D", "W", "B"]) + def test_period_mean(self, box, freq): + # GH#24757 + dti = date_range("2001-01-01", periods=11) + # shuffle so that we are not just working with monotone-increasing + dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6]) + + warn = FutureWarning if freq == "B" else None + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(warn, match=msg): + parr = dti._data.to_period(freq) + obj = box(parr) + with pytest.raises(TypeError, match="ambiguous"): + obj.mean() + with pytest.raises(TypeError, match="ambiguous"): + obj.mean(skipna=True) + + # parr[-2] will be the first date 2001-01-1 + parr[-2] = pd.NaT + + with pytest.raises(TypeError, match="ambiguous"): + obj.mean() + with pytest.raises(TypeError, match="ambiguous"): + obj.mean(skipna=True) + + @pytest.mark.parametrize("box", [Series, pd.Index, pd.array]) + def test_td64_mean(self, box): + m8values = np.array([0, 3, -2, -7, 1, 2, -1, 3, 5, -2, 4], "m8[D]") + tdi = pd.TimedeltaIndex(m8values).as_unit("ns") + + tdarr = tdi._data + obj = box(tdarr, copy=False) + + result = obj.mean() + expected = np.array(tdarr).mean() + assert result == expected + + tdarr[0] = pd.NaT + assert obj.mean(skipna=False) is pd.NaT + + result2 = obj.mean(skipna=True) + assert result2 == tdi[1:].mean() + + # exact equality fails by 1 nanosecond + assert result2.round("us") == (result * 11.0 / 10).round("us") + + +class TestSeriesStatReductions: + # Note: the name TestSeriesStatReductions indicates these tests + # were moved from a series-specific test file, _not_ that these tests are + # intended long-term to be series-specific + + def _check_stat_op( + self, name, alternate, string_series_, check_objects=False, check_allna=False + ): + with pd.option_context("use_bottleneck", False): + f = getattr(Series, name) + + # add some NaNs + string_series_[5:15] = np.nan + + # mean, idxmax, idxmin, min, and max are valid for dates + if name not in ["max", "min", "mean", "median", "std"]: + ds = Series(date_range("1/1/2001", periods=10)) + msg = f"does not support reduction '{name}'" + with pytest.raises(TypeError, match=msg): + f(ds) + + # skipna or no + assert pd.notna(f(string_series_)) + assert pd.isna(f(string_series_, skipna=False)) + + # check the result is correct + nona = string_series_.dropna() + tm.assert_almost_equal(f(nona), alternate(nona.values)) + tm.assert_almost_equal(f(string_series_), alternate(nona.values)) + + allna = string_series_ * np.nan + + if check_allna: + assert np.isnan(f(allna)) + + # dtype=object with None, it works! + s = Series([1, 2, 3, None, 5]) + f(s) + + # GH#2888 + items = [0] + items.extend(range(2**40, 2**40 + 1000)) + s = Series(items, dtype="int64") + tm.assert_almost_equal(float(f(s)), float(alternate(s.values))) + + # check date range + if check_objects: + s = Series(pd.bdate_range("1/1/2000", periods=10)) + res = f(s) + exp = alternate(s) + assert res == exp + + # check on string data + if name not in ["sum", "min", "max"]: + with pytest.raises(TypeError, match=None): + f(Series(list("abc"))) + + # Invalid axis. + msg = "No axis named 1 for object type Series" + with pytest.raises(ValueError, match=msg): + f(string_series_, axis=1) + + if "numeric_only" in inspect.getfullargspec(f).args: + # only the index is string; dtype is float + f(string_series_, numeric_only=True) + + def test_sum(self): + string_series = Series(range(20), dtype=np.float64, name="series") + self._check_stat_op("sum", np.sum, string_series, check_allna=False) + + def test_mean(self): + string_series = Series(range(20), dtype=np.float64, name="series") + self._check_stat_op("mean", np.mean, string_series) + + def test_median(self): + string_series = Series(range(20), dtype=np.float64, name="series") + self._check_stat_op("median", np.median, string_series) + + # test with integers, test failure + int_ts = Series(np.ones(10, dtype=int), index=range(10)) + tm.assert_almost_equal(np.median(int_ts), int_ts.median()) + + def test_prod(self): + string_series = Series(range(20), dtype=np.float64, name="series") + self._check_stat_op("prod", np.prod, string_series) + + def test_min(self): + string_series = Series(range(20), dtype=np.float64, name="series") + self._check_stat_op("min", np.min, string_series, check_objects=True) + + def test_max(self): + string_series = Series(range(20), dtype=np.float64, name="series") + self._check_stat_op("max", np.max, string_series, check_objects=True) + + def test_var_std(self): + string_series = Series(range(20), dtype=np.float64, name="series") + datetime_series = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + + alt = lambda x: np.std(x, ddof=1) + self._check_stat_op("std", alt, string_series) + + alt = lambda x: np.var(x, ddof=1) + self._check_stat_op("var", alt, string_series) + + result = datetime_series.std(ddof=4) + expected = np.std(datetime_series.values, ddof=4) + tm.assert_almost_equal(result, expected) + + result = datetime_series.var(ddof=4) + expected = np.var(datetime_series.values, ddof=4) + tm.assert_almost_equal(result, expected) + + # 1 - element series with ddof=1 + s = datetime_series.iloc[[0]] + result = s.var(ddof=1) + assert pd.isna(result) + + result = s.std(ddof=1) + assert pd.isna(result) + + def test_sem(self): + string_series = Series(range(20), dtype=np.float64, name="series") + datetime_series = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + + alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x)) + self._check_stat_op("sem", alt, string_series) + + result = datetime_series.sem(ddof=4) + expected = np.std(datetime_series.values, ddof=4) / np.sqrt( + len(datetime_series.values) + ) + tm.assert_almost_equal(result, expected) + + # 1 - element series with ddof=1 + s = datetime_series.iloc[[0]] + result = s.sem(ddof=1) + assert pd.isna(result) + + def test_skew(self): + sp_stats = pytest.importorskip("scipy.stats") + + string_series = Series(range(20), dtype=np.float64, name="series") + + alt = lambda x: sp_stats.skew(x, bias=False) + self._check_stat_op("skew", alt, string_series) + + # test corner cases, skew() returns NaN unless there's at least 3 + # values + min_N = 3 + for i in range(1, min_N + 1): + s = Series(np.ones(i)) + df = DataFrame(np.ones((i, i))) + if i < min_N: + assert np.isnan(s.skew()) + assert np.isnan(df.skew()).all() + else: + assert 0 == s.skew() + assert isinstance(s.skew(), np.float64) # GH53482 + assert (df.skew() == 0).all() + + def test_kurt(self): + sp_stats = pytest.importorskip("scipy.stats") + + string_series = Series(range(20), dtype=np.float64, name="series") + + alt = lambda x: sp_stats.kurtosis(x, bias=False) + self._check_stat_op("kurt", alt, string_series) + + def test_kurt_corner(self): + # test corner cases, kurt() returns NaN unless there's at least 4 + # values + min_N = 4 + for i in range(1, min_N + 1): + s = Series(np.ones(i)) + df = DataFrame(np.ones((i, i))) + if i < min_N: + assert np.isnan(s.kurt()) + assert np.isnan(df.kurt()).all() + else: + assert 0 == s.kurt() + assert isinstance(s.kurt(), np.float64) # GH53482 + assert (df.kurt() == 0).all()