`` or ````
+ element in the table. `` | `` stands for "table data". This function
+ attempts to properly handle ``colspan`` and ``rowspan`` attributes.
+ If the function has a ```` argument, it is used to construct
+ the header, otherwise the function attempts to find the header within
+ the body (by putting rows with only ```` elements into the header).
+
+ Similar to :func:`~read_csv` the `header` argument is applied
+ **after** `skiprows` is applied.
+
+ This function will *always* return a list of :class:`DataFrame` *or*
+ it will fail, e.g., it will *not* return an empty list.
+
+ Examples
+ --------
+ See the :ref:`read_html documentation in the IO section of the docs
+ ` for some examples of reading in HTML tables.
+ """
+ # Type check here. We don't want to parse only to fail because of an
+ # invalid value of an integer skiprows.
+ if isinstance(skiprows, numbers.Integral) and skiprows < 0:
+ raise ValueError(
+ "cannot skip rows starting from the end of the "
+ "data (you passed a negative value)"
+ )
+ if extract_links not in [None, "header", "footer", "body", "all"]:
+ raise ValueError(
+ "`extract_links` must be one of "
+ '{None, "header", "footer", "body", "all"}, got '
+ f'"{extract_links}"'
+ )
+
+ validate_header_arg(header)
+ check_dtype_backend(dtype_backend)
+
+ io = stringify_path(io)
+
+ if isinstance(io, str) and not any(
+ [
+ is_file_like(io),
+ file_exists(io),
+ is_url(io),
+ is_fsspec_url(io),
+ ]
+ ):
+ warnings.warn(
+ "Passing literal html to 'read_html' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "literal string, wrap it in a 'StringIO' object.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ return _parse(
+ flavor=flavor,
+ io=io,
+ match=match,
+ header=header,
+ index_col=index_col,
+ skiprows=skiprows,
+ parse_dates=parse_dates,
+ thousands=thousands,
+ attrs=attrs,
+ encoding=encoding,
+ decimal=decimal,
+ converters=converters,
+ na_values=na_values,
+ keep_default_na=keep_default_na,
+ displayed_only=displayed_only,
+ extract_links=extract_links,
+ dtype_backend=dtype_backend,
+ storage_options=storage_options,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parquet.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/parquet.py
new file mode 100644
index 0000000000000000000000000000000000000000..9570d6f8b26bd85585e5a46145b7871f1bb6eb3a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/parquet.py
@@ -0,0 +1,676 @@
+""" parquet compat """
+from __future__ import annotations
+
+import io
+import json
+import os
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Literal,
+)
+import warnings
+from warnings import catch_warnings
+
+from pandas._config import using_pyarrow_string_dtype
+from pandas._config.config import _get_option
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import AbstractMethodError
+from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ get_option,
+)
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io._util import arrow_string_types_mapper
+from pandas.io.common import (
+ IOHandles,
+ get_handle,
+ is_fsspec_url,
+ is_url,
+ stringify_path,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ DtypeBackend,
+ FilePath,
+ ReadBuffer,
+ StorageOptions,
+ WriteBuffer,
+ )
+
+
+def get_engine(engine: str) -> BaseImpl:
+ """return our implementation"""
+ if engine == "auto":
+ engine = get_option("io.parquet.engine")
+
+ if engine == "auto":
+ # try engines in this order
+ engine_classes = [PyArrowImpl, FastParquetImpl]
+
+ error_msgs = ""
+ for engine_class in engine_classes:
+ try:
+ return engine_class()
+ except ImportError as err:
+ error_msgs += "\n - " + str(err)
+
+ raise ImportError(
+ "Unable to find a usable engine; "
+ "tried using: 'pyarrow', 'fastparquet'.\n"
+ "A suitable version of "
+ "pyarrow or fastparquet is required for parquet "
+ "support.\n"
+ "Trying to import the above resulted in these errors:"
+ f"{error_msgs}"
+ )
+
+ if engine == "pyarrow":
+ return PyArrowImpl()
+ elif engine == "fastparquet":
+ return FastParquetImpl()
+
+ raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
+
+
+def _get_path_or_handle(
+ path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
+ fs: Any,
+ storage_options: StorageOptions | None = None,
+ mode: str = "rb",
+ is_dir: bool = False,
+) -> tuple[
+ FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any
+]:
+ """File handling for PyArrow."""
+ path_or_handle = stringify_path(path)
+ if fs is not None:
+ pa_fs = import_optional_dependency("pyarrow.fs", errors="ignore")
+ fsspec = import_optional_dependency("fsspec", errors="ignore")
+ if pa_fs is not None and isinstance(fs, pa_fs.FileSystem):
+ if storage_options:
+ raise NotImplementedError(
+ "storage_options not supported with a pyarrow FileSystem."
+ )
+ elif fsspec is not None and isinstance(fs, fsspec.spec.AbstractFileSystem):
+ pass
+ else:
+ raise ValueError(
+ f"filesystem must be a pyarrow or fsspec FileSystem, "
+ f"not a {type(fs).__name__}"
+ )
+ if is_fsspec_url(path_or_handle) and fs is None:
+ if storage_options is None:
+ pa = import_optional_dependency("pyarrow")
+ pa_fs = import_optional_dependency("pyarrow.fs")
+
+ try:
+ fs, path_or_handle = pa_fs.FileSystem.from_uri(path)
+ except (TypeError, pa.ArrowInvalid):
+ pass
+ if fs is None:
+ fsspec = import_optional_dependency("fsspec")
+ fs, path_or_handle = fsspec.core.url_to_fs(
+ path_or_handle, **(storage_options or {})
+ )
+ elif storage_options and (not is_url(path_or_handle) or mode != "rb"):
+ # can't write to a remote url
+ # without making use of fsspec at the moment
+ raise ValueError("storage_options passed with buffer, or non-supported URL")
+
+ handles = None
+ if (
+ not fs
+ and not is_dir
+ and isinstance(path_or_handle, str)
+ and not os.path.isdir(path_or_handle)
+ ):
+ # use get_handle only when we are very certain that it is not a directory
+ # fsspec resources can also point to directories
+ # this branch is used for example when reading from non-fsspec URLs
+ handles = get_handle(
+ path_or_handle, mode, is_text=False, storage_options=storage_options
+ )
+ fs = None
+ path_or_handle = handles.handle
+ return path_or_handle, handles, fs
+
+
+class BaseImpl:
+ @staticmethod
+ def validate_dataframe(df: DataFrame) -> None:
+ if not isinstance(df, DataFrame):
+ raise ValueError("to_parquet only supports IO with DataFrames")
+
+ def write(self, df: DataFrame, path, compression, **kwargs):
+ raise AbstractMethodError(self)
+
+ def read(self, path, columns=None, **kwargs) -> DataFrame:
+ raise AbstractMethodError(self)
+
+
+class PyArrowImpl(BaseImpl):
+ def __init__(self) -> None:
+ import_optional_dependency(
+ "pyarrow", extra="pyarrow is required for parquet support."
+ )
+ import pyarrow.parquet
+
+ # import utils to register the pyarrow extension types
+ import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401
+
+ self.api = pyarrow
+
+ def write(
+ self,
+ df: DataFrame,
+ path: FilePath | WriteBuffer[bytes],
+ compression: str | None = "snappy",
+ index: bool | None = None,
+ storage_options: StorageOptions | None = None,
+ partition_cols: list[str] | None = None,
+ filesystem=None,
+ **kwargs,
+ ) -> None:
+ self.validate_dataframe(df)
+
+ from_pandas_kwargs: dict[str, Any] = {"schema": kwargs.pop("schema", None)}
+ if index is not None:
+ from_pandas_kwargs["preserve_index"] = index
+
+ table = self.api.Table.from_pandas(df, **from_pandas_kwargs)
+
+ if df.attrs:
+ df_metadata = {"PANDAS_ATTRS": json.dumps(df.attrs)}
+ existing_metadata = table.schema.metadata
+ merged_metadata = {**existing_metadata, **df_metadata}
+ table = table.replace_schema_metadata(merged_metadata)
+
+ path_or_handle, handles, filesystem = _get_path_or_handle(
+ path,
+ filesystem,
+ storage_options=storage_options,
+ mode="wb",
+ is_dir=partition_cols is not None,
+ )
+ if (
+ isinstance(path_or_handle, io.BufferedWriter)
+ and hasattr(path_or_handle, "name")
+ and isinstance(path_or_handle.name, (str, bytes))
+ ):
+ if isinstance(path_or_handle.name, bytes):
+ path_or_handle = path_or_handle.name.decode()
+ else:
+ path_or_handle = path_or_handle.name
+
+ try:
+ if partition_cols is not None:
+ # writes to multiple files under the given path
+ self.api.parquet.write_to_dataset(
+ table,
+ path_or_handle,
+ compression=compression,
+ partition_cols=partition_cols,
+ filesystem=filesystem,
+ **kwargs,
+ )
+ else:
+ # write to single output file
+ self.api.parquet.write_table(
+ table,
+ path_or_handle,
+ compression=compression,
+ filesystem=filesystem,
+ **kwargs,
+ )
+ finally:
+ if handles is not None:
+ handles.close()
+
+ def read(
+ self,
+ path,
+ columns=None,
+ filters=None,
+ use_nullable_dtypes: bool = False,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ storage_options: StorageOptions | None = None,
+ filesystem=None,
+ **kwargs,
+ ) -> DataFrame:
+ kwargs["use_pandas_metadata"] = True
+
+ to_pandas_kwargs = {}
+ if dtype_backend == "numpy_nullable":
+ from pandas.io._util import _arrow_dtype_mapping
+
+ mapping = _arrow_dtype_mapping()
+ to_pandas_kwargs["types_mapper"] = mapping.get
+ elif dtype_backend == "pyarrow":
+ to_pandas_kwargs["types_mapper"] = pd.ArrowDtype # type: ignore[assignment]
+ elif using_pyarrow_string_dtype():
+ to_pandas_kwargs["types_mapper"] = arrow_string_types_mapper()
+
+ manager = _get_option("mode.data_manager", silent=True)
+ if manager == "array":
+ to_pandas_kwargs["split_blocks"] = True # type: ignore[assignment]
+
+ path_or_handle, handles, filesystem = _get_path_or_handle(
+ path,
+ filesystem,
+ storage_options=storage_options,
+ mode="rb",
+ )
+ try:
+ pa_table = self.api.parquet.read_table(
+ path_or_handle,
+ columns=columns,
+ filesystem=filesystem,
+ filters=filters,
+ **kwargs,
+ )
+ result = pa_table.to_pandas(**to_pandas_kwargs)
+
+ if manager == "array":
+ result = result._as_manager("array", copy=False)
+
+ if pa_table.schema.metadata:
+ if b"PANDAS_ATTRS" in pa_table.schema.metadata:
+ df_metadata = pa_table.schema.metadata[b"PANDAS_ATTRS"]
+ result.attrs = json.loads(df_metadata)
+ return result
+ finally:
+ if handles is not None:
+ handles.close()
+
+
+class FastParquetImpl(BaseImpl):
+ def __init__(self) -> None:
+ # since pandas is a dependency of fastparquet
+ # we need to import on first use
+ fastparquet = import_optional_dependency(
+ "fastparquet", extra="fastparquet is required for parquet support."
+ )
+ self.api = fastparquet
+
+ def write(
+ self,
+ df: DataFrame,
+ path,
+ compression: Literal["snappy", "gzip", "brotli"] | None = "snappy",
+ index=None,
+ partition_cols=None,
+ storage_options: StorageOptions | None = None,
+ filesystem=None,
+ **kwargs,
+ ) -> None:
+ self.validate_dataframe(df)
+
+ if "partition_on" in kwargs and partition_cols is not None:
+ raise ValueError(
+ "Cannot use both partition_on and "
+ "partition_cols. Use partition_cols for partitioning data"
+ )
+ if "partition_on" in kwargs:
+ partition_cols = kwargs.pop("partition_on")
+
+ if partition_cols is not None:
+ kwargs["file_scheme"] = "hive"
+
+ if filesystem is not None:
+ raise NotImplementedError(
+ "filesystem is not implemented for the fastparquet engine."
+ )
+
+ # cannot use get_handle as write() does not accept file buffers
+ path = stringify_path(path)
+ if is_fsspec_url(path):
+ fsspec = import_optional_dependency("fsspec")
+
+ # if filesystem is provided by fsspec, file must be opened in 'wb' mode.
+ kwargs["open_with"] = lambda path, _: fsspec.open(
+ path, "wb", **(storage_options or {})
+ ).open()
+ elif storage_options:
+ raise ValueError(
+ "storage_options passed with file object or non-fsspec file path"
+ )
+
+ with catch_warnings(record=True):
+ self.api.write(
+ path,
+ df,
+ compression=compression,
+ write_index=index,
+ partition_on=partition_cols,
+ **kwargs,
+ )
+
+ def read(
+ self,
+ path,
+ columns=None,
+ filters=None,
+ storage_options: StorageOptions | None = None,
+ filesystem=None,
+ **kwargs,
+ ) -> DataFrame:
+ parquet_kwargs: dict[str, Any] = {}
+ use_nullable_dtypes = kwargs.pop("use_nullable_dtypes", False)
+ dtype_backend = kwargs.pop("dtype_backend", lib.no_default)
+ # We are disabling nullable dtypes for fastparquet pending discussion
+ parquet_kwargs["pandas_nulls"] = False
+ if use_nullable_dtypes:
+ raise ValueError(
+ "The 'use_nullable_dtypes' argument is not supported for the "
+ "fastparquet engine"
+ )
+ if dtype_backend is not lib.no_default:
+ raise ValueError(
+ "The 'dtype_backend' argument is not supported for the "
+ "fastparquet engine"
+ )
+ if filesystem is not None:
+ raise NotImplementedError(
+ "filesystem is not implemented for the fastparquet engine."
+ )
+ path = stringify_path(path)
+ handles = None
+ if is_fsspec_url(path):
+ fsspec = import_optional_dependency("fsspec")
+
+ parquet_kwargs["fs"] = fsspec.open(path, "rb", **(storage_options or {})).fs
+ elif isinstance(path, str) and not os.path.isdir(path):
+ # use get_handle only when we are very certain that it is not a directory
+ # fsspec resources can also point to directories
+ # this branch is used for example when reading from non-fsspec URLs
+ handles = get_handle(
+ path, "rb", is_text=False, storage_options=storage_options
+ )
+ path = handles.handle
+
+ try:
+ parquet_file = self.api.ParquetFile(path, **parquet_kwargs)
+ return parquet_file.to_pandas(columns=columns, filters=filters, **kwargs)
+ finally:
+ if handles is not None:
+ handles.close()
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+def to_parquet(
+ df: DataFrame,
+ path: FilePath | WriteBuffer[bytes] | None = None,
+ engine: str = "auto",
+ compression: str | None = "snappy",
+ index: bool | None = None,
+ storage_options: StorageOptions | None = None,
+ partition_cols: list[str] | None = None,
+ filesystem: Any = None,
+ **kwargs,
+) -> bytes | None:
+ """
+ Write a DataFrame to the parquet format.
+
+ Parameters
+ ----------
+ df : DataFrame
+ path : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``write()`` function. If None, the result is
+ returned as bytes. If a string, it will be used as Root Directory path
+ when writing a partitioned dataset. The engine fastparquet does not
+ accept file-like objects.
+ engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
+ Parquet library to use. If 'auto', then the option
+ ``io.parquet.engine`` is used. The default ``io.parquet.engine``
+ behavior is to try 'pyarrow', falling back to 'fastparquet' if
+ 'pyarrow' is unavailable.
+
+ When using the ``'pyarrow'`` engine and no storage options are provided
+ and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
+ (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
+ Use the filesystem keyword with an instantiated fsspec filesystem
+ if you wish to use its implementation.
+ compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}},
+ default 'snappy'. Name of the compression to use. Use ``None``
+ for no compression.
+ index : bool, default None
+ If ``True``, include the dataframe's index(es) in the file output. If
+ ``False``, they will not be written to the file.
+ If ``None``, similar to ``True`` the dataframe's index(es)
+ will be saved. However, instead of being saved as values,
+ the RangeIndex will be stored as a range in the metadata so it
+ doesn't require much space and is faster. Other indexes will
+ be included as columns in the file output.
+ partition_cols : str or list, optional, default None
+ Column names by which to partition the dataset.
+ Columns are partitioned in the order they are given.
+ Must be None if path is not a string.
+ {storage_options}
+
+ filesystem : fsspec or pyarrow filesystem, default None
+ Filesystem object to use when reading the parquet file. Only implemented
+ for ``engine="pyarrow"``.
+
+ .. versionadded:: 2.1.0
+
+ kwargs
+ Additional keyword arguments passed to the engine
+
+ Returns
+ -------
+ bytes if no path argument is provided else None
+ """
+ if isinstance(partition_cols, str):
+ partition_cols = [partition_cols]
+ impl = get_engine(engine)
+
+ path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path
+
+ impl.write(
+ df,
+ path_or_buf,
+ compression=compression,
+ index=index,
+ partition_cols=partition_cols,
+ storage_options=storage_options,
+ filesystem=filesystem,
+ **kwargs,
+ )
+
+ if path is None:
+ assert isinstance(path_or_buf, io.BytesIO)
+ return path_or_buf.getvalue()
+ else:
+ return None
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+def read_parquet(
+ path: FilePath | ReadBuffer[bytes],
+ engine: str = "auto",
+ columns: list[str] | None = None,
+ storage_options: StorageOptions | None = None,
+ use_nullable_dtypes: bool | lib.NoDefault = lib.no_default,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ filesystem: Any = None,
+ filters: list[tuple] | list[list[tuple]] | None = None,
+ **kwargs,
+) -> DataFrame:
+ """
+ Load a parquet object from the file path, returning a DataFrame.
+
+ Parameters
+ ----------
+ path : str, path object or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function.
+ The string could be a URL. Valid URL schemes include http, ftp, s3,
+ gs, and file. For file URLs, a host is expected. A local file could be:
+ ``file://localhost/path/to/table.parquet``.
+ A file URL can also be a path to a directory that contains multiple
+ partitioned parquet files. Both pyarrow and fastparquet support
+ paths to directories as well as file URLs. A directory path could be:
+ ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.
+ engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
+ Parquet library to use. If 'auto', then the option
+ ``io.parquet.engine`` is used. The default ``io.parquet.engine``
+ behavior is to try 'pyarrow', falling back to 'fastparquet' if
+ 'pyarrow' is unavailable.
+
+ When using the ``'pyarrow'`` engine and no storage options are provided
+ and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
+ (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
+ Use the filesystem keyword with an instantiated fsspec filesystem
+ if you wish to use its implementation.
+ columns : list, default=None
+ If not None, only these columns will be read from the file.
+ {storage_options}
+
+ .. versionadded:: 1.3.0
+
+ use_nullable_dtypes : bool, default False
+ If True, use dtypes that use ``pd.NA`` as missing value indicator
+ for the resulting DataFrame. (only applicable for the ``pyarrow``
+ engine)
+ As new dtypes are added that support ``pd.NA`` in the future, the
+ output with this option will change to use those dtypes.
+ Note: this is an experimental option, and behaviour (e.g. additional
+ support dtypes) may change without notice.
+
+ .. deprecated:: 2.0
+
+ dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ filesystem : fsspec or pyarrow filesystem, default None
+ Filesystem object to use when reading the parquet file. Only implemented
+ for ``engine="pyarrow"``.
+
+ .. versionadded:: 2.1.0
+
+ filters : List[Tuple] or List[List[Tuple]], default None
+ To filter out data.
+ Filter syntax: [[(column, op, val), ...],...]
+ where op is [==, =, >, >=, <, <=, !=, in, not in]
+ The innermost tuples are transposed into a set of filters applied
+ through an `AND` operation.
+ The outer list combines these sets of filters through an `OR`
+ operation.
+ A single list of tuples can also be used, meaning that no `OR`
+ operation between set of filters is to be conducted.
+
+ Using this argument will NOT result in row-wise filtering of the final
+ partitions unless ``engine="pyarrow"`` is also specified. For
+ other engines, filtering is only performed at the partition level, that is,
+ to prevent the loading of some row-groups and/or files.
+
+ .. versionadded:: 2.1.0
+
+ **kwargs
+ Any additional kwargs are passed to the engine.
+
+ Returns
+ -------
+ DataFrame
+
+ See Also
+ --------
+ DataFrame.to_parquet : Create a parquet object that serializes a DataFrame.
+
+ Examples
+ --------
+ >>> original_df = pd.DataFrame(
+ ... {{"foo": range(5), "bar": range(5, 10)}}
+ ... )
+ >>> original_df
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ >>> df_parquet_bytes = original_df.to_parquet()
+ >>> from io import BytesIO
+ >>> restored_df = pd.read_parquet(BytesIO(df_parquet_bytes))
+ >>> restored_df
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ >>> restored_df.equals(original_df)
+ True
+ >>> restored_bar = pd.read_parquet(BytesIO(df_parquet_bytes), columns=["bar"])
+ >>> restored_bar
+ bar
+ 0 5
+ 1 6
+ 2 7
+ 3 8
+ 4 9
+ >>> restored_bar.equals(original_df[['bar']])
+ True
+
+ The function uses `kwargs` that are passed directly to the engine.
+ In the following example, we use the `filters` argument of the pyarrow
+ engine to filter the rows of the DataFrame.
+
+ Since `pyarrow` is the default engine, we can omit the `engine` argument.
+ Note that the `filters` argument is implemented by the `pyarrow` engine,
+ which can benefit from multithreading and also potentially be more
+ economical in terms of memory.
+
+ >>> sel = [("foo", ">", 2)]
+ >>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel)
+ >>> restored_part
+ foo bar
+ 0 3 8
+ 1 4 9
+ """
+
+ impl = get_engine(engine)
+
+ if use_nullable_dtypes is not lib.no_default:
+ msg = (
+ "The argument 'use_nullable_dtypes' is deprecated and will be removed "
+ "in a future version."
+ )
+ if use_nullable_dtypes is True:
+ msg += (
+ "Use dtype_backend='numpy_nullable' instead of use_nullable_dtype=True."
+ )
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
+ else:
+ use_nullable_dtypes = False
+ check_dtype_backend(dtype_backend)
+
+ return impl.read(
+ path,
+ columns=columns,
+ filters=filters,
+ storage_options=storage_options,
+ use_nullable_dtypes=use_nullable_dtypes,
+ dtype_backend=dtype_backend,
+ filesystem=filesystem,
+ **kwargs,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/pickle.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dae0e7106b69a471f0c2702158cfe0f11f0389c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/pickle.py
@@ -0,0 +1,210 @@
+""" pickle compat """
+from __future__ import annotations
+
+import pickle
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
+import warnings
+
+from pandas.compat import pickle_compat as pc
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.common import get_handle
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ CompressionOptions,
+ FilePath,
+ ReadPickleBuffer,
+ StorageOptions,
+ WriteBuffer,
+ )
+
+ from pandas import (
+ DataFrame,
+ Series,
+ )
+
+
+@doc(
+ storage_options=_shared_docs["storage_options"],
+ compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",
+)
+def to_pickle(
+ obj: Any,
+ filepath_or_buffer: FilePath | WriteBuffer[bytes],
+ compression: CompressionOptions = "infer",
+ protocol: int = pickle.HIGHEST_PROTOCOL,
+ storage_options: StorageOptions | None = None,
+) -> None:
+ """
+ Pickle (serialize) object to file.
+
+ Parameters
+ ----------
+ obj : any object
+ Any python object.
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``write()`` function.
+ Also accepts URL. URL has to be of S3 or GCS.
+ {compression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ protocol : int
+ Int which indicates which protocol should be used by the pickler,
+ default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
+ values for this parameter depend on the version of Python. For Python
+ 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value.
+ For Python >= 3.4, 4 is a valid value. A negative value for the
+ protocol parameter is equivalent to setting its value to
+ HIGHEST_PROTOCOL.
+
+ {storage_options}
+
+ .. [1] https://docs.python.org/3/library/pickle.html
+
+ See Also
+ --------
+ read_pickle : Load pickled pandas object (or any object) from file.
+ DataFrame.to_hdf : Write DataFrame to an HDF5 file.
+ DataFrame.to_sql : Write DataFrame to a SQL database.
+ DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
+
+ Examples
+ --------
+ >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP
+ >>> original_df # doctest: +SKIP
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP
+
+ >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
+ >>> unpickled_df # doctest: +SKIP
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ """ # noqa: E501
+ if protocol < 0:
+ protocol = pickle.HIGHEST_PROTOCOL
+
+ with get_handle(
+ filepath_or_buffer,
+ "wb",
+ compression=compression,
+ is_text=False,
+ storage_options=storage_options,
+ ) as handles:
+ # letting pickle write directly to the buffer is more memory-efficient
+ pickle.dump(obj, handles.handle, protocol=protocol)
+
+
+@doc(
+ storage_options=_shared_docs["storage_options"],
+ decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer",
+)
+def read_pickle(
+ filepath_or_buffer: FilePath | ReadPickleBuffer,
+ compression: CompressionOptions = "infer",
+ storage_options: StorageOptions | None = None,
+) -> DataFrame | Series:
+ """
+ Load pickled pandas object (or any object) from file.
+
+ .. warning::
+
+ Loading pickled data received from untrusted sources can be
+ unsafe. See `here `__.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``readlines()`` function.
+ Also accepts URL. URL is not limited to S3 and GCS.
+
+ {decompression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ {storage_options}
+
+ Returns
+ -------
+ same type as object stored in file
+
+ See Also
+ --------
+ DataFrame.to_pickle : Pickle (serialize) DataFrame object to file.
+ Series.to_pickle : Pickle (serialize) Series object to file.
+ read_hdf : Read HDF5 file into a DataFrame.
+ read_sql : Read SQL query or database table into a DataFrame.
+ read_parquet : Load a parquet object, returning a DataFrame.
+
+ Notes
+ -----
+ read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3
+ provided the object was serialized with to_pickle.
+
+ Examples
+ --------
+ >>> original_df = pd.DataFrame(
+ ... {{"foo": range(5), "bar": range(5, 10)}}
+ ... ) # doctest: +SKIP
+ >>> original_df # doctest: +SKIP
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP
+
+ >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
+ >>> unpickled_df # doctest: +SKIP
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ """
+ excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError)
+ with get_handle(
+ filepath_or_buffer,
+ "rb",
+ compression=compression,
+ is_text=False,
+ storage_options=storage_options,
+ ) as handles:
+ # 1) try standard library Pickle
+ # 2) try pickle_compat (older pandas version) to handle subclass changes
+ # 3) try pickle_compat with latin-1 encoding upon a UnicodeDecodeError
+
+ try:
+ # TypeError for Cython complaints about object.__new__ vs Tick.__new__
+ try:
+ with warnings.catch_warnings(record=True):
+ # We want to silence any warnings about, e.g. moved modules.
+ warnings.simplefilter("ignore", Warning)
+ return pickle.load(handles.handle)
+ except excs_to_catch:
+ # e.g.
+ # "No module named 'pandas.core.sparse.series'"
+ # "Can't get attribute '__nat_unpickle' on pd.Series:
+ """
+ Convert to Timestamp if possible, otherwise to datetime.datetime.
+ SAS float64 lacks precision for more than ms resolution so the fit
+ to datetime.datetime is ok.
+
+ Parameters
+ ----------
+ sas_datetimes : {Series, Sequence[float]}
+ Dates or datetimes in SAS
+ unit : {'d', 's'}
+ "d" if the floats represent dates, "s" for datetimes
+
+ Returns
+ -------
+ Series
+ Series of datetime64 dtype or datetime.datetime.
+ """
+ td = (_sas_origin - _unix_origin).as_unit("s")
+ if unit == "s":
+ millis = cast_from_unit_vectorized(
+ sas_datetimes._values, unit="s", out_unit="ms"
+ )
+ dt64ms = millis.view("M8[ms]") + td
+ return pd.Series(dt64ms, index=sas_datetimes.index, copy=False)
+ else:
+ vals = np.array(sas_datetimes, dtype="M8[D]") + td
+ return pd.Series(vals, dtype="M8[s]", index=sas_datetimes.index, copy=False)
+
+
+class _Column:
+ col_id: int
+ name: str | bytes
+ label: str | bytes
+ format: str | bytes
+ ctype: bytes
+ length: int
+
+ def __init__(
+ self,
+ col_id: int,
+ # These can be bytes when convert_header_text is False
+ name: str | bytes,
+ label: str | bytes,
+ format: str | bytes,
+ ctype: bytes,
+ length: int,
+ ) -> None:
+ self.col_id = col_id
+ self.name = name
+ self.label = label
+ self.format = format
+ self.ctype = ctype
+ self.length = length
+
+
+# SAS7BDAT represents a SAS data file in SAS7BDAT format.
+class SAS7BDATReader(ReaderBase, abc.Iterator):
+ """
+ Read SAS files in SAS7BDAT format.
+
+ Parameters
+ ----------
+ path_or_buf : path name or buffer
+ Name of SAS file or file-like object pointing to SAS file
+ contents.
+ index : column identifier, defaults to None
+ Column to use as index.
+ convert_dates : bool, defaults to True
+ Attempt to convert dates to Pandas datetime values. Note that
+ some rarely used SAS date formats may be unsupported.
+ blank_missing : bool, defaults to True
+ Convert empty strings to missing values (SAS uses blanks to
+ indicate missing character variables).
+ chunksize : int, defaults to None
+ Return SAS7BDATReader object for iterations, returns chunks
+ with given number of lines.
+ encoding : str, 'infer', defaults to None
+ String encoding acc. to Python standard encodings,
+ encoding='infer' tries to detect the encoding from the file header,
+ encoding=None will leave the data in binary format.
+ convert_text : bool, defaults to True
+ If False, text variables are left as raw bytes.
+ convert_header_text : bool, defaults to True
+ If False, header text, including column names, are left as raw
+ bytes.
+ """
+
+ _int_length: int
+ _cached_page: bytes | None
+
+ def __init__(
+ self,
+ path_or_buf: FilePath | ReadBuffer[bytes],
+ index=None,
+ convert_dates: bool = True,
+ blank_missing: bool = True,
+ chunksize: int | None = None,
+ encoding: str | None = None,
+ convert_text: bool = True,
+ convert_header_text: bool = True,
+ compression: CompressionOptions = "infer",
+ ) -> None:
+ self.index = index
+ self.convert_dates = convert_dates
+ self.blank_missing = blank_missing
+ self.chunksize = chunksize
+ self.encoding = encoding
+ self.convert_text = convert_text
+ self.convert_header_text = convert_header_text
+
+ self.default_encoding = "latin-1"
+ self.compression = b""
+ self.column_names_raw: list[bytes] = []
+ self.column_names: list[str | bytes] = []
+ self.column_formats: list[str | bytes] = []
+ self.columns: list[_Column] = []
+
+ self._current_page_data_subheader_pointers: list[tuple[int, int]] = []
+ self._cached_page = None
+ self._column_data_lengths: list[int] = []
+ self._column_data_offsets: list[int] = []
+ self._column_types: list[bytes] = []
+
+ self._current_row_in_file_index = 0
+ self._current_row_on_page_index = 0
+ self._current_row_in_file_index = 0
+
+ self.handles = get_handle(
+ path_or_buf, "rb", is_text=False, compression=compression
+ )
+
+ self._path_or_buf = self.handles.handle
+
+ # Same order as const.SASIndex
+ self._subheader_processors = [
+ self._process_rowsize_subheader,
+ self._process_columnsize_subheader,
+ self._process_subheader_counts,
+ self._process_columntext_subheader,
+ self._process_columnname_subheader,
+ self._process_columnattributes_subheader,
+ self._process_format_subheader,
+ self._process_columnlist_subheader,
+ None, # Data
+ ]
+
+ try:
+ self._get_properties()
+ self._parse_metadata()
+ except Exception:
+ self.close()
+ raise
+
+ def column_data_lengths(self) -> np.ndarray:
+ """Return a numpy int64 array of the column data lengths"""
+ return np.asarray(self._column_data_lengths, dtype=np.int64)
+
+ def column_data_offsets(self) -> np.ndarray:
+ """Return a numpy int64 array of the column offsets"""
+ return np.asarray(self._column_data_offsets, dtype=np.int64)
+
+ def column_types(self) -> np.ndarray:
+ """
+ Returns a numpy character array of the column types:
+ s (string) or d (double)
+ """
+ return np.asarray(self._column_types, dtype=np.dtype("S1"))
+
+ def close(self) -> None:
+ self.handles.close()
+
+ def _get_properties(self) -> None:
+ # Check magic number
+ self._path_or_buf.seek(0)
+ self._cached_page = self._path_or_buf.read(288)
+ if self._cached_page[0 : len(const.magic)] != const.magic:
+ raise ValueError("magic number mismatch (not a SAS file?)")
+
+ # Get alignment information
+ buf = self._read_bytes(const.align_1_offset, const.align_1_length)
+ if buf == const.u64_byte_checker_value:
+ self.U64 = True
+ self._int_length = 8
+ self._page_bit_offset = const.page_bit_offset_x64
+ self._subheader_pointer_length = const.subheader_pointer_length_x64
+ else:
+ self.U64 = False
+ self._page_bit_offset = const.page_bit_offset_x86
+ self._subheader_pointer_length = const.subheader_pointer_length_x86
+ self._int_length = 4
+ buf = self._read_bytes(const.align_2_offset, const.align_2_length)
+ if buf == const.align_1_checker_value:
+ align1 = const.align_2_value
+ else:
+ align1 = 0
+
+ # Get endianness information
+ buf = self._read_bytes(const.endianness_offset, const.endianness_length)
+ if buf == b"\x01":
+ self.byte_order = "<"
+ self.need_byteswap = sys.byteorder == "big"
+ else:
+ self.byte_order = ">"
+ self.need_byteswap = sys.byteorder == "little"
+
+ # Get encoding information
+ buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
+ if buf in const.encoding_names:
+ self.inferred_encoding = const.encoding_names[buf]
+ if self.encoding == "infer":
+ self.encoding = self.inferred_encoding
+ else:
+ self.inferred_encoding = f"unknown (code={buf})"
+
+ # Timestamp is epoch 01/01/1960
+ epoch = datetime(1960, 1, 1)
+ x = self._read_float(
+ const.date_created_offset + align1, const.date_created_length
+ )
+ self.date_created = epoch + pd.to_timedelta(x, unit="s")
+ x = self._read_float(
+ const.date_modified_offset + align1, const.date_modified_length
+ )
+ self.date_modified = epoch + pd.to_timedelta(x, unit="s")
+
+ self.header_length = self._read_uint(
+ const.header_size_offset + align1, const.header_size_length
+ )
+
+ # Read the rest of the header into cached_page.
+ buf = self._path_or_buf.read(self.header_length - 288)
+ self._cached_page += buf
+ # error: Argument 1 to "len" has incompatible type "Optional[bytes]";
+ # expected "Sized"
+ if len(self._cached_page) != self.header_length: # type: ignore[arg-type]
+ raise ValueError("The SAS7BDAT file appears to be truncated.")
+
+ self._page_length = self._read_uint(
+ const.page_size_offset + align1, const.page_size_length
+ )
+
+ def __next__(self) -> DataFrame:
+ da = self.read(nrows=self.chunksize or 1)
+ if da.empty:
+ self.close()
+ raise StopIteration
+ return da
+
+ # Read a single float of the given width (4 or 8).
+ def _read_float(self, offset: int, width: int):
+ assert self._cached_page is not None
+ if width == 4:
+ return read_float_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 8:
+ return read_double_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ else:
+ self.close()
+ raise ValueError("invalid float width")
+
+ # Read a single unsigned integer of the given width (1, 2, 4 or 8).
+ def _read_uint(self, offset: int, width: int) -> int:
+ assert self._cached_page is not None
+ if width == 1:
+ return self._read_bytes(offset, 1)[0]
+ elif width == 2:
+ return read_uint16_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 4:
+ return read_uint32_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 8:
+ return read_uint64_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ else:
+ self.close()
+ raise ValueError("invalid int width")
+
+ def _read_bytes(self, offset: int, length: int):
+ assert self._cached_page is not None
+ if offset + length > len(self._cached_page):
+ self.close()
+ raise ValueError("The cached page is too small.")
+ return self._cached_page[offset : offset + length]
+
+ def _read_and_convert_header_text(self, offset: int, length: int) -> str | bytes:
+ return self._convert_header_text(
+ self._read_bytes(offset, length).rstrip(b"\x00 ")
+ )
+
+ def _parse_metadata(self) -> None:
+ done = False
+ while not done:
+ self._cached_page = self._path_or_buf.read(self._page_length)
+ if len(self._cached_page) <= 0:
+ break
+ if len(self._cached_page) != self._page_length:
+ raise ValueError("Failed to read a meta data page from the SAS file.")
+ done = self._process_page_meta()
+
+ def _process_page_meta(self) -> bool:
+ self._read_page_header()
+ pt = const.page_meta_types + [const.page_amd_type, const.page_mix_type]
+ if self._current_page_type in pt:
+ self._process_page_metadata()
+ is_data_page = self._current_page_type == const.page_data_type
+ is_mix_page = self._current_page_type == const.page_mix_type
+ return bool(
+ is_data_page
+ or is_mix_page
+ or self._current_page_data_subheader_pointers != []
+ )
+
+ def _read_page_header(self) -> None:
+ bit_offset = self._page_bit_offset
+ tx = const.page_type_offset + bit_offset
+ self._current_page_type = (
+ self._read_uint(tx, const.page_type_length) & const.page_type_mask2
+ )
+ tx = const.block_count_offset + bit_offset
+ self._current_page_block_count = self._read_uint(tx, const.block_count_length)
+ tx = const.subheader_count_offset + bit_offset
+ self._current_page_subheaders_count = self._read_uint(
+ tx, const.subheader_count_length
+ )
+
+ def _process_page_metadata(self) -> None:
+ bit_offset = self._page_bit_offset
+
+ for i in range(self._current_page_subheaders_count):
+ offset = const.subheader_pointers_offset + bit_offset
+ total_offset = offset + self._subheader_pointer_length * i
+
+ subheader_offset = self._read_uint(total_offset, self._int_length)
+ total_offset += self._int_length
+
+ subheader_length = self._read_uint(total_offset, self._int_length)
+ total_offset += self._int_length
+
+ subheader_compression = self._read_uint(total_offset, 1)
+ total_offset += 1
+
+ subheader_type = self._read_uint(total_offset, 1)
+
+ if (
+ subheader_length == 0
+ or subheader_compression == const.truncated_subheader_id
+ ):
+ continue
+
+ subheader_signature = self._read_bytes(subheader_offset, self._int_length)
+ subheader_index = get_subheader_index(subheader_signature)
+ subheader_processor = self._subheader_processors[subheader_index]
+
+ if subheader_processor is None:
+ f1 = subheader_compression in (const.compressed_subheader_id, 0)
+ f2 = subheader_type == const.compressed_subheader_type
+ if self.compression and f1 and f2:
+ self._current_page_data_subheader_pointers.append(
+ (subheader_offset, subheader_length)
+ )
+ else:
+ self.close()
+ raise ValueError(
+ f"Unknown subheader signature {subheader_signature}"
+ )
+ else:
+ subheader_processor(subheader_offset, subheader_length)
+
+ def _process_rowsize_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ lcs_offset = offset
+ lcp_offset = offset
+ if self.U64:
+ lcs_offset += 682
+ lcp_offset += 706
+ else:
+ lcs_offset += 354
+ lcp_offset += 378
+
+ self.row_length = self._read_uint(
+ offset + const.row_length_offset_multiplier * int_len,
+ int_len,
+ )
+ self.row_count = self._read_uint(
+ offset + const.row_count_offset_multiplier * int_len,
+ int_len,
+ )
+ self.col_count_p1 = self._read_uint(
+ offset + const.col_count_p1_multiplier * int_len, int_len
+ )
+ self.col_count_p2 = self._read_uint(
+ offset + const.col_count_p2_multiplier * int_len, int_len
+ )
+ mx = const.row_count_on_mix_page_offset_multiplier * int_len
+ self._mix_page_row_count = self._read_uint(offset + mx, int_len)
+ self._lcs = self._read_uint(lcs_offset, 2)
+ self._lcp = self._read_uint(lcp_offset, 2)
+
+ def _process_columnsize_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ offset += int_len
+ self.column_count = self._read_uint(offset, int_len)
+ if self.col_count_p1 + self.col_count_p2 != self.column_count:
+ print(
+ f"Warning: column count mismatch ({self.col_count_p1} + "
+ f"{self.col_count_p2} != {self.column_count})\n"
+ )
+
+ # Unknown purpose
+ def _process_subheader_counts(self, offset: int, length: int) -> None:
+ pass
+
+ def _process_columntext_subheader(self, offset: int, length: int) -> None:
+ offset += self._int_length
+ text_block_size = self._read_uint(offset, const.text_block_size_length)
+
+ buf = self._read_bytes(offset, text_block_size)
+ cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
+ self.column_names_raw.append(cname_raw)
+
+ if len(self.column_names_raw) == 1:
+ compression_literal = b""
+ for cl in const.compression_literals:
+ if cl in cname_raw:
+ compression_literal = cl
+ self.compression = compression_literal
+ offset -= self._int_length
+
+ offset1 = offset + 16
+ if self.U64:
+ offset1 += 4
+
+ buf = self._read_bytes(offset1, self._lcp)
+ compression_literal = buf.rstrip(b"\x00")
+ if compression_literal == b"":
+ self._lcs = 0
+ offset1 = offset + 32
+ if self.U64:
+ offset1 += 4
+ buf = self._read_bytes(offset1, self._lcp)
+ self.creator_proc = buf[0 : self._lcp]
+ elif compression_literal == const.rle_compression:
+ offset1 = offset + 40
+ if self.U64:
+ offset1 += 4
+ buf = self._read_bytes(offset1, self._lcp)
+ self.creator_proc = buf[0 : self._lcp]
+ elif self._lcs > 0:
+ self._lcp = 0
+ offset1 = offset + 16
+ if self.U64:
+ offset1 += 4
+ buf = self._read_bytes(offset1, self._lcs)
+ self.creator_proc = buf[0 : self._lcp]
+ if hasattr(self, "creator_proc"):
+ self.creator_proc = self._convert_header_text(self.creator_proc)
+
+ def _process_columnname_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ offset += int_len
+ column_name_pointers_count = (length - 2 * int_len - 12) // 8
+ for i in range(column_name_pointers_count):
+ text_subheader = (
+ offset
+ + const.column_name_pointer_length * (i + 1)
+ + const.column_name_text_subheader_offset
+ )
+ col_name_offset = (
+ offset
+ + const.column_name_pointer_length * (i + 1)
+ + const.column_name_offset_offset
+ )
+ col_name_length = (
+ offset
+ + const.column_name_pointer_length * (i + 1)
+ + const.column_name_length_offset
+ )
+
+ idx = self._read_uint(
+ text_subheader, const.column_name_text_subheader_length
+ )
+ col_offset = self._read_uint(
+ col_name_offset, const.column_name_offset_length
+ )
+ col_len = self._read_uint(col_name_length, const.column_name_length_length)
+
+ name_raw = self.column_names_raw[idx]
+ cname = name_raw[col_offset : col_offset + col_len]
+ self.column_names.append(self._convert_header_text(cname))
+
+ def _process_columnattributes_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8)
+ for i in range(column_attributes_vectors_count):
+ col_data_offset = (
+ offset + int_len + const.column_data_offset_offset + i * (int_len + 8)
+ )
+ col_data_len = (
+ offset
+ + 2 * int_len
+ + const.column_data_length_offset
+ + i * (int_len + 8)
+ )
+ col_types = (
+ offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)
+ )
+
+ x = self._read_uint(col_data_offset, int_len)
+ self._column_data_offsets.append(x)
+
+ x = self._read_uint(col_data_len, const.column_data_length_length)
+ self._column_data_lengths.append(x)
+
+ x = self._read_uint(col_types, const.column_type_length)
+ self._column_types.append(b"d" if x == 1 else b"s")
+
+ def _process_columnlist_subheader(self, offset: int, length: int) -> None:
+ # unknown purpose
+ pass
+
+ def _process_format_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ text_subheader_format = (
+ offset + const.column_format_text_subheader_index_offset + 3 * int_len
+ )
+ col_format_offset = offset + const.column_format_offset_offset + 3 * int_len
+ col_format_len = offset + const.column_format_length_offset + 3 * int_len
+ text_subheader_label = (
+ offset + const.column_label_text_subheader_index_offset + 3 * int_len
+ )
+ col_label_offset = offset + const.column_label_offset_offset + 3 * int_len
+ col_label_len = offset + const.column_label_length_offset + 3 * int_len
+
+ x = self._read_uint(
+ text_subheader_format, const.column_format_text_subheader_index_length
+ )
+ format_idx = min(x, len(self.column_names_raw) - 1)
+
+ format_start = self._read_uint(
+ col_format_offset, const.column_format_offset_length
+ )
+ format_len = self._read_uint(col_format_len, const.column_format_length_length)
+
+ label_idx = self._read_uint(
+ text_subheader_label, const.column_label_text_subheader_index_length
+ )
+ label_idx = min(label_idx, len(self.column_names_raw) - 1)
+
+ label_start = self._read_uint(
+ col_label_offset, const.column_label_offset_length
+ )
+ label_len = self._read_uint(col_label_len, const.column_label_length_length)
+
+ label_names = self.column_names_raw[label_idx]
+ column_label = self._convert_header_text(
+ label_names[label_start : label_start + label_len]
+ )
+ format_names = self.column_names_raw[format_idx]
+ column_format = self._convert_header_text(
+ format_names[format_start : format_start + format_len]
+ )
+ current_column_number = len(self.columns)
+
+ col = _Column(
+ current_column_number,
+ self.column_names[current_column_number],
+ column_label,
+ column_format,
+ self._column_types[current_column_number],
+ self._column_data_lengths[current_column_number],
+ )
+
+ self.column_formats.append(column_format)
+ self.columns.append(col)
+
+ def read(self, nrows: int | None = None) -> DataFrame:
+ if (nrows is None) and (self.chunksize is not None):
+ nrows = self.chunksize
+ elif nrows is None:
+ nrows = self.row_count
+
+ if len(self._column_types) == 0:
+ self.close()
+ raise EmptyDataError("No columns to parse from file")
+
+ if nrows > 0 and self._current_row_in_file_index >= self.row_count:
+ return DataFrame()
+
+ nrows = min(nrows, self.row_count - self._current_row_in_file_index)
+
+ nd = self._column_types.count(b"d")
+ ns = self._column_types.count(b"s")
+
+ self._string_chunk = np.empty((ns, nrows), dtype=object)
+ self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8)
+
+ self._current_row_in_chunk_index = 0
+ p = Parser(self)
+ p.read(nrows)
+
+ rslt = self._chunk_to_dataframe()
+ if self.index is not None:
+ rslt = rslt.set_index(self.index)
+
+ return rslt
+
+ def _read_next_page(self):
+ self._current_page_data_subheader_pointers = []
+ self._cached_page = self._path_or_buf.read(self._page_length)
+ if len(self._cached_page) <= 0:
+ return True
+ elif len(self._cached_page) != self._page_length:
+ self.close()
+ msg = (
+ "failed to read complete page from file (read "
+ f"{len(self._cached_page):d} of {self._page_length:d} bytes)"
+ )
+ raise ValueError(msg)
+
+ self._read_page_header()
+ if self._current_page_type in const.page_meta_types:
+ self._process_page_metadata()
+
+ if self._current_page_type not in const.page_meta_types + [
+ const.page_data_type,
+ const.page_mix_type,
+ ]:
+ return self._read_next_page()
+
+ return False
+
+ def _chunk_to_dataframe(self) -> DataFrame:
+ n = self._current_row_in_chunk_index
+ m = self._current_row_in_file_index
+ ix = range(m - n, m)
+ rslt = {}
+
+ js, jb = 0, 0
+ for j in range(self.column_count):
+ name = self.column_names[j]
+
+ if self._column_types[j] == b"d":
+ col_arr = self._byte_chunk[jb, :].view(dtype=self.byte_order + "d")
+ rslt[name] = pd.Series(col_arr, dtype=np.float64, index=ix, copy=False)
+ if self.convert_dates:
+ if self.column_formats[j] in const.sas_date_formats:
+ rslt[name] = _convert_datetimes(rslt[name], "d")
+ elif self.column_formats[j] in const.sas_datetime_formats:
+ rslt[name] = _convert_datetimes(rslt[name], "s")
+ jb += 1
+ elif self._column_types[j] == b"s":
+ rslt[name] = pd.Series(self._string_chunk[js, :], index=ix, copy=False)
+ if self.convert_text and (self.encoding is not None):
+ rslt[name] = self._decode_string(rslt[name].str)
+ js += 1
+ else:
+ self.close()
+ raise ValueError(f"unknown column type {repr(self._column_types[j])}")
+
+ df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False)
+ return df
+
+ def _decode_string(self, b):
+ return b.decode(self.encoding or self.default_encoding)
+
+ def _convert_header_text(self, b: bytes) -> str | bytes:
+ if self.convert_header_text:
+ return self._decode_string(b)
+ else:
+ return b
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py
new file mode 100644
index 0000000000000000000000000000000000000000..11b2ed0ee73168ba82e3b8d312f96bcea9398e49
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py
@@ -0,0 +1,508 @@
+"""
+Read a SAS XPort format file into a Pandas DataFrame.
+
+Based on code from Jack Cushman (github.com/jcushman/xport).
+
+The file format is defined here:
+
+https://support.sas.com/content/dam/SAS/support/en/technical-papers/record-layout-of-a-sas-version-5-or-6-data-set-in-sas-transport-xport-format.pdf
+"""
+from __future__ import annotations
+
+from collections import abc
+from datetime import datetime
+import struct
+from typing import TYPE_CHECKING
+import warnings
+
+import numpy as np
+
+from pandas.util._decorators import Appender
+from pandas.util._exceptions import find_stack_level
+
+import pandas as pd
+
+from pandas.io.common import get_handle
+from pandas.io.sas.sasreader import ReaderBase
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ CompressionOptions,
+ DatetimeNaTType,
+ FilePath,
+ ReadBuffer,
+ )
+_correct_line1 = (
+ "HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
+ "000000000000000000000000000000 "
+)
+_correct_header1 = (
+ "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000"
+)
+_correct_header2 = (
+ "HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
+ "000000000000000000000000000000 "
+)
+_correct_obs_header = (
+ "HEADER RECORD*******OBS HEADER RECORD!!!!!!!"
+ "000000000000000000000000000000 "
+)
+_fieldkeys = [
+ "ntype",
+ "nhfun",
+ "field_length",
+ "nvar0",
+ "name",
+ "label",
+ "nform",
+ "nfl",
+ "num_decimals",
+ "nfj",
+ "nfill",
+ "niform",
+ "nifl",
+ "nifd",
+ "npos",
+ "_",
+]
+
+
+_base_params_doc = """\
+Parameters
+----------
+filepath_or_buffer : str or file-like object
+ Path to SAS file or object implementing binary read method."""
+
+_params2_doc = """\
+index : identifier of index column
+ Identifier of column that should be used as index of the DataFrame.
+encoding : str
+ Encoding for text data.
+chunksize : int
+ Read file `chunksize` lines at a time, returns iterator."""
+
+_format_params_doc = """\
+format : str
+ File format, only `xport` is currently supported."""
+
+_iterator_doc = """\
+iterator : bool, default False
+ Return XportReader object for reading file incrementally."""
+
+
+_read_sas_doc = f"""Read a SAS file into a DataFrame.
+
+{_base_params_doc}
+{_format_params_doc}
+{_params2_doc}
+{_iterator_doc}
+
+Returns
+-------
+DataFrame or XportReader
+
+Examples
+--------
+Read a SAS Xport file:
+
+>>> df = pd.read_sas('filename.XPT')
+
+Read a Xport file in 10,000 line chunks:
+
+>>> itr = pd.read_sas('filename.XPT', chunksize=10000)
+>>> for chunk in itr:
+>>> do_something(chunk)
+
+"""
+
+_xport_reader_doc = f"""\
+Class for reading SAS Xport files.
+
+{_base_params_doc}
+{_params2_doc}
+
+Attributes
+----------
+member_info : list
+ Contains information about the file
+fields : list
+ Contains information about the variables in the file
+"""
+
+_read_method_doc = """\
+Read observations from SAS Xport file, returning as data frame.
+
+Parameters
+----------
+nrows : int
+ Number of rows to read from data file; if None, read whole
+ file.
+
+Returns
+-------
+A DataFrame.
+"""
+
+
+def _parse_date(datestr: str) -> DatetimeNaTType:
+ """Given a date in xport format, return Python date."""
+ try:
+ # e.g. "16FEB11:10:07:55"
+ return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
+ except ValueError:
+ return pd.NaT
+
+
+def _split_line(s: str, parts):
+ """
+ Parameters
+ ----------
+ s: str
+ Fixed-length string to split
+ parts: list of (name, length) pairs
+ Used to break up string, name '_' will be filtered from output.
+
+ Returns
+ -------
+ Dict of name:contents of string at given location.
+ """
+ out = {}
+ start = 0
+ for name, length in parts:
+ out[name] = s[start : start + length].strip()
+ start += length
+ del out["_"]
+ return out
+
+
+def _handle_truncated_float_vec(vec, nbytes):
+ # This feature is not well documented, but some SAS XPORT files
+ # have 2-7 byte "truncated" floats. To read these truncated
+ # floats, pad them with zeros on the right to make 8 byte floats.
+ #
+ # References:
+ # https://github.com/jcushman/xport/pull/3
+ # The R "foreign" library
+
+ if nbytes != 8:
+ vec1 = np.zeros(len(vec), np.dtype("S8"))
+ dtype = np.dtype(f"S{nbytes},S{8 - nbytes}")
+ vec2 = vec1.view(dtype=dtype)
+ vec2["f0"] = vec
+ return vec2
+
+ return vec
+
+
+def _parse_float_vec(vec):
+ """
+ Parse a vector of float values representing IBM 8 byte floats into
+ native 8 byte floats.
+ """
+ dtype = np.dtype(">u4,>u4")
+ vec1 = vec.view(dtype=dtype)
+ xport1 = vec1["f0"]
+ xport2 = vec1["f1"]
+
+ # Start by setting first half of ieee number to first half of IBM
+ # number sans exponent
+ ieee1 = xport1 & 0x00FFFFFF
+
+ # The fraction bit to the left of the binary point in the ieee
+ # format was set and the number was shifted 0, 1, 2, or 3
+ # places. This will tell us how to adjust the ibm exponent to be a
+ # power of 2 ieee exponent and how to shift the fraction bits to
+ # restore the correct magnitude.
+ shift = np.zeros(len(vec), dtype=np.uint8)
+ shift[np.where(xport1 & 0x00200000)] = 1
+ shift[np.where(xport1 & 0x00400000)] = 2
+ shift[np.where(xport1 & 0x00800000)] = 3
+
+ # shift the ieee number down the correct number of places then
+ # set the second half of the ieee number to be the second half
+ # of the ibm number shifted appropriately, ored with the bits
+ # from the first half that would have been shifted in if we
+ # could shift a double. All we are worried about are the low
+ # order 3 bits of the first half since we're only shifting by
+ # 1, 2, or 3.
+ ieee1 >>= shift
+ ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
+
+ # clear the 1 bit to the left of the binary point
+ ieee1 &= 0xFFEFFFFF
+
+ # set the exponent of the ieee number to be the actual exponent
+ # plus the shift count + 1023. Or this into the first half of the
+ # ieee number. The ibm exponent is excess 64 but is adjusted by 65
+ # since during conversion to ibm format the exponent is
+ # incremented by 1 and the fraction bits left 4 positions to the
+ # right of the radix point. (had to add >> 24 because C treats &
+ # 0x7f as 0x7f000000 and Python doesn't)
+ ieee1 |= ((((((xport1 >> 24) & 0x7F) - 65) << 2) + shift + 1023) << 20) | (
+ xport1 & 0x80000000
+ )
+
+ ieee = np.empty((len(ieee1),), dtype=">u4,>u4")
+ ieee["f0"] = ieee1
+ ieee["f1"] = ieee2
+ ieee = ieee.view(dtype=">f8")
+ ieee = ieee.astype("f8")
+
+ return ieee
+
+
+class XportReader(ReaderBase, abc.Iterator):
+ __doc__ = _xport_reader_doc
+
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ index=None,
+ encoding: str | None = "ISO-8859-1",
+ chunksize: int | None = None,
+ compression: CompressionOptions = "infer",
+ ) -> None:
+ self._encoding = encoding
+ self._lines_read = 0
+ self._index = index
+ self._chunksize = chunksize
+
+ self.handles = get_handle(
+ filepath_or_buffer,
+ "rb",
+ encoding=encoding,
+ is_text=False,
+ compression=compression,
+ )
+ self.filepath_or_buffer = self.handles.handle
+
+ try:
+ self._read_header()
+ except Exception:
+ self.close()
+ raise
+
+ def close(self) -> None:
+ self.handles.close()
+
+ def _get_row(self):
+ return self.filepath_or_buffer.read(80).decode()
+
+ def _read_header(self) -> None:
+ self.filepath_or_buffer.seek(0)
+
+ # read file header
+ line1 = self._get_row()
+ if line1 != _correct_line1:
+ if "**COMPRESSED**" in line1:
+ # this was created with the PROC CPORT method and can't be read
+ # https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/movefile/p1bm6aqp3fw4uin1hucwh718f6kp.htm
+ raise ValueError(
+ "Header record indicates a CPORT file, which is not readable."
+ )
+ raise ValueError("Header record is not an XPORT file.")
+
+ line2 = self._get_row()
+ fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]]
+ file_info = _split_line(line2, fif)
+ if file_info["prefix"] != "SAS SAS SASLIB":
+ raise ValueError("Header record has invalid prefix.")
+ file_info["created"] = _parse_date(file_info["created"])
+ self.file_info = file_info
+
+ line3 = self._get_row()
+ file_info["modified"] = _parse_date(line3[:16])
+
+ # read member header
+ header1 = self._get_row()
+ header2 = self._get_row()
+ headflag1 = header1.startswith(_correct_header1)
+ headflag2 = header2 == _correct_header2
+ if not (headflag1 and headflag2):
+ raise ValueError("Member header not found")
+ # usually 140, could be 135
+ fieldnamelength = int(header1[-5:-2])
+
+ # member info
+ mem = [
+ ["prefix", 8],
+ ["set_name", 8],
+ ["sasdata", 8],
+ ["version", 8],
+ ["OS", 8],
+ ["_", 24],
+ ["created", 16],
+ ]
+ member_info = _split_line(self._get_row(), mem)
+ mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]]
+ member_info.update(_split_line(self._get_row(), mem))
+ member_info["modified"] = _parse_date(member_info["modified"])
+ member_info["created"] = _parse_date(member_info["created"])
+ self.member_info = member_info
+
+ # read field names
+ types = {1: "numeric", 2: "char"}
+ fieldcount = int(self._get_row()[54:58])
+ datalength = fieldnamelength * fieldcount
+ # round up to nearest 80
+ if datalength % 80:
+ datalength += 80 - datalength % 80
+ fielddata = self.filepath_or_buffer.read(datalength)
+ fields = []
+ obs_length = 0
+ while len(fielddata) >= fieldnamelength:
+ # pull data for one field
+ fieldbytes, fielddata = (
+ fielddata[:fieldnamelength],
+ fielddata[fieldnamelength:],
+ )
+
+ # rest at end gets ignored, so if field is short, pad out
+ # to match struct pattern below
+ fieldbytes = fieldbytes.ljust(140)
+
+ fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", fieldbytes)
+ field = dict(zip(_fieldkeys, fieldstruct))
+ del field["_"]
+ field["ntype"] = types[field["ntype"]]
+ fl = field["field_length"]
+ if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):
+ msg = f"Floating field width {fl} is not between 2 and 8."
+ raise TypeError(msg)
+
+ for k, v in field.items():
+ try:
+ field[k] = v.strip()
+ except AttributeError:
+ pass
+
+ obs_length += field["field_length"]
+ fields += [field]
+
+ header = self._get_row()
+ if not header == _correct_obs_header:
+ raise ValueError("Observation header not found.")
+
+ self.fields = fields
+ self.record_length = obs_length
+ self.record_start = self.filepath_or_buffer.tell()
+
+ self.nobs = self._record_count()
+ self.columns = [x["name"].decode() for x in self.fields]
+
+ # Setup the dtype.
+ dtypel = [
+ ("s" + str(i), "S" + str(field["field_length"]))
+ for i, field in enumerate(self.fields)
+ ]
+ dtype = np.dtype(dtypel)
+ self._dtype = dtype
+
+ def __next__(self) -> pd.DataFrame:
+ return self.read(nrows=self._chunksize or 1)
+
+ def _record_count(self) -> int:
+ """
+ Get number of records in file.
+
+ This is maybe suboptimal because we have to seek to the end of
+ the file.
+
+ Side effect: returns file position to record_start.
+ """
+ self.filepath_or_buffer.seek(0, 2)
+ total_records_length = self.filepath_or_buffer.tell() - self.record_start
+
+ if total_records_length % 80 != 0:
+ warnings.warn(
+ "xport file may be corrupted.",
+ stacklevel=find_stack_level(),
+ )
+
+ if self.record_length > 80:
+ self.filepath_or_buffer.seek(self.record_start)
+ return total_records_length // self.record_length
+
+ self.filepath_or_buffer.seek(-80, 2)
+ last_card_bytes = self.filepath_or_buffer.read(80)
+ last_card = np.frombuffer(last_card_bytes, dtype=np.uint64)
+
+ # 8 byte blank
+ ix = np.flatnonzero(last_card == 2314885530818453536)
+
+ if len(ix) == 0:
+ tail_pad = 0
+ else:
+ tail_pad = 8 * len(ix)
+
+ self.filepath_or_buffer.seek(self.record_start)
+
+ return (total_records_length - tail_pad) // self.record_length
+
+ def get_chunk(self, size: int | None = None) -> pd.DataFrame:
+ """
+ Reads lines from Xport file and returns as dataframe
+
+ Parameters
+ ----------
+ size : int, defaults to None
+ Number of lines to read. If None, reads whole file.
+
+ Returns
+ -------
+ DataFrame
+ """
+ if size is None:
+ size = self._chunksize
+ return self.read(nrows=size)
+
+ def _missing_double(self, vec):
+ v = vec.view(dtype="u1,u1,u2,u4")
+ miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0)
+ miss1 = (
+ ((v["f0"] >= 0x41) & (v["f0"] <= 0x5A))
+ | (v["f0"] == 0x5F)
+ | (v["f0"] == 0x2E)
+ )
+ miss &= miss1
+ return miss
+
+ @Appender(_read_method_doc)
+ def read(self, nrows: int | None = None) -> pd.DataFrame:
+ if nrows is None:
+ nrows = self.nobs
+
+ read_lines = min(nrows, self.nobs - self._lines_read)
+ read_len = read_lines * self.record_length
+ if read_len <= 0:
+ self.close()
+ raise StopIteration
+ raw = self.filepath_or_buffer.read(read_len)
+ data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
+
+ df_data = {}
+ for j, x in enumerate(self.columns):
+ vec = data["s" + str(j)]
+ ntype = self.fields[j]["ntype"]
+ if ntype == "numeric":
+ vec = _handle_truncated_float_vec(vec, self.fields[j]["field_length"])
+ miss = self._missing_double(vec)
+ v = _parse_float_vec(vec)
+ v[miss] = np.nan
+ elif self.fields[j]["ntype"] == "char":
+ v = [y.rstrip() for y in vec]
+
+ if self._encoding is not None:
+ v = [y.decode(self._encoding) for y in v]
+
+ df_data.update({x: v})
+ df = pd.DataFrame(df_data)
+
+ if self._index is None:
+ df.index = pd.Index(range(self._lines_read, self._lines_read + read_lines))
+ else:
+ df = df.set_index(self._index)
+
+ self._lines_read += read_lines
+
+ return df
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sasreader.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sasreader.py
new file mode 100644
index 0000000000000000000000000000000000000000..c39313d5dc6548fcc014f7a886988a2b9d9001ed
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sasreader.py
@@ -0,0 +1,178 @@
+"""
+Read SAS sas7bdat or xport files.
+"""
+from __future__ import annotations
+
+from abc import (
+ ABC,
+ abstractmethod,
+)
+from typing import (
+ TYPE_CHECKING,
+ overload,
+)
+
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.common import stringify_path
+
+if TYPE_CHECKING:
+ from collections.abc import Hashable
+ from types import TracebackType
+
+ from pandas._typing import (
+ CompressionOptions,
+ FilePath,
+ ReadBuffer,
+ Self,
+ )
+
+ from pandas import DataFrame
+
+
+class ReaderBase(ABC):
+ """
+ Protocol for XportReader and SAS7BDATReader classes.
+ """
+
+ @abstractmethod
+ def read(self, nrows: int | None = None) -> DataFrame:
+ ...
+
+ @abstractmethod
+ def close(self) -> None:
+ ...
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+
+@overload
+def read_sas(
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ *,
+ format: str | None = ...,
+ index: Hashable | None = ...,
+ encoding: str | None = ...,
+ chunksize: int = ...,
+ iterator: bool = ...,
+ compression: CompressionOptions = ...,
+) -> ReaderBase:
+ ...
+
+
+@overload
+def read_sas(
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ *,
+ format: str | None = ...,
+ index: Hashable | None = ...,
+ encoding: str | None = ...,
+ chunksize: None = ...,
+ iterator: bool = ...,
+ compression: CompressionOptions = ...,
+) -> DataFrame | ReaderBase:
+ ...
+
+
+@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer")
+def read_sas(
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ *,
+ format: str | None = None,
+ index: Hashable | None = None,
+ encoding: str | None = None,
+ chunksize: int | None = None,
+ iterator: bool = False,
+ compression: CompressionOptions = "infer",
+) -> DataFrame | ReaderBase:
+ """
+ Read SAS files stored as either XPORT or SAS7BDAT format files.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function. The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be:
+ ``file://localhost/path/to/table.sas7bdat``.
+ format : str {{'xport', 'sas7bdat'}} or None
+ If None, file format is inferred from file extension. If 'xport' or
+ 'sas7bdat', uses the corresponding format.
+ index : identifier of index column, defaults to None
+ Identifier of column that should be used as index of the DataFrame.
+ encoding : str, default is None
+ Encoding for text data. If None, text data are stored as raw bytes.
+ chunksize : int
+ Read file `chunksize` lines at a time, returns iterator.
+ iterator : bool, defaults to False
+ If True, returns an iterator for reading the file incrementally.
+ {decompression_options}
+
+ Returns
+ -------
+ DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
+ or XportReader
+
+ Examples
+ --------
+ >>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP
+ """
+ if format is None:
+ buffer_error_msg = (
+ "If this is a buffer object rather "
+ "than a string name, you must specify a format string"
+ )
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+ if not isinstance(filepath_or_buffer, str):
+ raise ValueError(buffer_error_msg)
+ fname = filepath_or_buffer.lower()
+ if ".xpt" in fname:
+ format = "xport"
+ elif ".sas7bdat" in fname:
+ format = "sas7bdat"
+ else:
+ raise ValueError(
+ f"unable to infer format of SAS file from filename: {repr(fname)}"
+ )
+
+ reader: ReaderBase
+ if format.lower() == "xport":
+ from pandas.io.sas.sas_xport import XportReader
+
+ reader = XportReader(
+ filepath_or_buffer,
+ index=index,
+ encoding=encoding,
+ chunksize=chunksize,
+ compression=compression,
+ )
+ elif format.lower() == "sas7bdat":
+ from pandas.io.sas.sas7bdat import SAS7BDATReader
+
+ reader = SAS7BDATReader(
+ filepath_or_buffer,
+ index=index,
+ encoding=encoding,
+ chunksize=chunksize,
+ compression=compression,
+ )
+ else:
+ raise ValueError("unknown SAS format")
+
+ if iterator or chunksize:
+ return reader
+
+ with reader:
+ return reader.read()
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/spss.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/spss.py
new file mode 100644
index 0000000000000000000000000000000000000000..db31a07df79e6de2862e57fd75de0bd4b9c2455d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/spss.py
@@ -0,0 +1,72 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.inference import is_list_like
+
+from pandas.io.common import stringify_path
+
+if TYPE_CHECKING:
+ from collections.abc import Sequence
+ from pathlib import Path
+
+ from pandas._typing import DtypeBackend
+
+ from pandas import DataFrame
+
+
+def read_spss(
+ path: str | Path,
+ usecols: Sequence[str] | None = None,
+ convert_categoricals: bool = True,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame:
+ """
+ Load an SPSS file from the file path, returning a DataFrame.
+
+ Parameters
+ ----------
+ path : str or Path
+ File path.
+ usecols : list-like, optional
+ Return a subset of the columns. If None, return all columns.
+ convert_categoricals : bool, default is True
+ Convert categorical columns into pd.Categorical.
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ Returns
+ -------
+ DataFrame
+
+ Examples
+ --------
+ >>> df = pd.read_spss("spss_data.sav") # doctest: +SKIP
+ """
+ pyreadstat = import_optional_dependency("pyreadstat")
+ check_dtype_backend(dtype_backend)
+
+ if usecols is not None:
+ if not is_list_like(usecols):
+ raise TypeError("usecols must be list-like.")
+ usecols = list(usecols) # pyreadstat requires a list
+
+ df, metadata = pyreadstat.read_sav(
+ stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals
+ )
+ df.attrs = metadata.__dict__
+ if dtype_backend is not lib.no_default:
+ df = df.convert_dtypes(dtype_backend=dtype_backend)
+ return df
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sql.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e17175167f25a4bfc7eb559070927f56dc84eae
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/sql.py
@@ -0,0 +1,2926 @@
+"""
+Collection of query wrappers / abstractions to both facilitate data
+retrieval and to reduce dependency on DB-specific API.
+"""
+
+from __future__ import annotations
+
+from abc import (
+ ABC,
+ abstractmethod,
+)
+from contextlib import (
+ ExitStack,
+ contextmanager,
+)
+from datetime import (
+ date,
+ datetime,
+ time,
+)
+from functools import partial
+import re
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Literal,
+ cast,
+ overload,
+)
+import warnings
+
+import numpy as np
+
+from pandas._config import using_pyarrow_string_dtype
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import (
+ AbstractMethodError,
+ DatabaseError,
+)
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.common import (
+ is_dict_like,
+ is_list_like,
+)
+from pandas.core.dtypes.dtypes import (
+ ArrowDtype,
+ DatetimeTZDtype,
+)
+from pandas.core.dtypes.missing import isna
+
+from pandas import get_option
+from pandas.core.api import (
+ DataFrame,
+ Series,
+)
+from pandas.core.arrays import ArrowExtensionArray
+from pandas.core.base import PandasObject
+import pandas.core.common as com
+from pandas.core.common import maybe_make_list
+from pandas.core.internals.construction import convert_object_array
+from pandas.core.tools.datetimes import to_datetime
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Iterator,
+ Mapping,
+ )
+
+ from sqlalchemy import Table
+ from sqlalchemy.sql.expression import (
+ Select,
+ TextClause,
+ )
+
+ from pandas._typing import (
+ DateTimeErrorChoices,
+ DtypeArg,
+ DtypeBackend,
+ IndexLabel,
+ Self,
+ )
+
+ from pandas import Index
+
+# -----------------------------------------------------------------------------
+# -- Helper functions
+
+
+def _process_parse_dates_argument(parse_dates):
+ """Process parse_dates argument for read_sql functions"""
+ # handle non-list entries for parse_dates gracefully
+ if parse_dates is True or parse_dates is None or parse_dates is False:
+ parse_dates = []
+
+ elif not hasattr(parse_dates, "__iter__"):
+ parse_dates = [parse_dates]
+ return parse_dates
+
+
+def _handle_date_column(
+ col, utc: bool = False, format: str | dict[str, Any] | None = None
+):
+ if isinstance(format, dict):
+ # GH35185 Allow custom error values in parse_dates argument of
+ # read_sql like functions.
+ # Format can take on custom to_datetime argument values such as
+ # {"errors": "coerce"} or {"dayfirst": True}
+ error: DateTimeErrorChoices = format.pop("errors", None) or "ignore"
+ if error == "ignore":
+ try:
+ return to_datetime(col, **format)
+ except (TypeError, ValueError):
+ # TODO: not reached 2023-10-27; needed?
+ return col
+ return to_datetime(col, errors=error, **format)
+ else:
+ # Allow passing of formatting string for integers
+ # GH17855
+ if format is None and (
+ issubclass(col.dtype.type, np.floating)
+ or issubclass(col.dtype.type, np.integer)
+ ):
+ format = "s"
+ if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
+ return to_datetime(col, errors="coerce", unit=format, utc=utc)
+ elif isinstance(col.dtype, DatetimeTZDtype):
+ # coerce to UTC timezone
+ # GH11216
+ return to_datetime(col, utc=True)
+ else:
+ return to_datetime(col, errors="coerce", format=format, utc=utc)
+
+
+def _parse_date_columns(data_frame, parse_dates):
+ """
+ Force non-datetime columns to be read as such.
+ Supports both string formatted and integer timestamp columns.
+ """
+ parse_dates = _process_parse_dates_argument(parse_dates)
+
+ # we want to coerce datetime64_tz dtypes for now to UTC
+ # we could in theory do a 'nice' conversion from a FixedOffset tz
+ # GH11216
+ for i, (col_name, df_col) in enumerate(data_frame.items()):
+ if isinstance(df_col.dtype, DatetimeTZDtype) or col_name in parse_dates:
+ try:
+ fmt = parse_dates[col_name]
+ except (KeyError, TypeError):
+ fmt = None
+ data_frame.isetitem(i, _handle_date_column(df_col, format=fmt))
+
+ return data_frame
+
+
+def _convert_arrays_to_dataframe(
+ data,
+ columns,
+ coerce_float: bool = True,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+) -> DataFrame:
+ content = lib.to_object_array_tuples(data)
+ arrays = convert_object_array(
+ list(content.T),
+ dtype=None,
+ coerce_float=coerce_float,
+ dtype_backend=dtype_backend,
+ )
+ if dtype_backend == "pyarrow":
+ pa = import_optional_dependency("pyarrow")
+
+ result_arrays = []
+ for arr in arrays:
+ pa_array = pa.array(arr, from_pandas=True)
+ if arr.dtype == "string":
+ # TODO: Arrow still infers strings arrays as regular strings instead
+ # of large_string, which is what we preserver everywhere else for
+ # dtype_backend="pyarrow". We may want to reconsider this
+ pa_array = pa_array.cast(pa.string())
+ result_arrays.append(ArrowExtensionArray(pa_array))
+ arrays = result_arrays # type: ignore[assignment]
+ if arrays:
+ df = DataFrame(dict(zip(list(range(len(columns))), arrays)))
+ df.columns = columns
+ return df
+ else:
+ return DataFrame(columns=columns)
+
+
+def _wrap_result(
+ data,
+ columns,
+ index_col=None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ dtype: DtypeArg | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+):
+ """Wrap result set of a SQLAlchemy query in a DataFrame."""
+ frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend)
+
+ if dtype:
+ frame = frame.astype(dtype)
+
+ frame = _parse_date_columns(frame, parse_dates)
+
+ if index_col is not None:
+ frame = frame.set_index(index_col)
+
+ return frame
+
+
+def _wrap_result_adbc(
+ df: DataFrame,
+ *,
+ index_col=None,
+ parse_dates=None,
+ dtype: DtypeArg | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+) -> DataFrame:
+ """Wrap result set of a SQLAlchemy query in a DataFrame."""
+ if dtype:
+ df = df.astype(dtype)
+
+ df = _parse_date_columns(df, parse_dates)
+
+ if index_col is not None:
+ df = df.set_index(index_col)
+
+ return df
+
+
+def execute(sql, con, params=None):
+ """
+ Execute the given SQL query using the provided connection object.
+
+ Parameters
+ ----------
+ sql : string
+ SQL query to be executed.
+ con : SQLAlchemy connection or sqlite3 connection
+ If a DBAPI2 object, only sqlite3 is supported.
+ params : list or tuple, optional, default: None
+ List of parameters to pass to execute method.
+
+ Returns
+ -------
+ Results Iterable
+ """
+ warnings.warn(
+ "`pandas.io.sql.execute` is deprecated and "
+ "will be removed in the future version.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ ) # GH50185
+ sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore")
+
+ if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Engine)):
+ raise TypeError("pandas.io.sql.execute requires a connection") # GH50185
+ with pandasSQL_builder(con, need_transaction=True) as pandas_sql:
+ return pandas_sql.execute(sql, params)
+
+
+# -----------------------------------------------------------------------------
+# -- Read and write to DataFrames
+
+
+@overload
+def read_sql_table(
+ table_name: str,
+ con,
+ schema=...,
+ index_col: str | list[str] | None = ...,
+ coerce_float=...,
+ parse_dates: list[str] | dict[str, str] | None = ...,
+ columns: list[str] | None = ...,
+ chunksize: None = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame:
+ ...
+
+
+@overload
+def read_sql_table(
+ table_name: str,
+ con,
+ schema=...,
+ index_col: str | list[str] | None = ...,
+ coerce_float=...,
+ parse_dates: list[str] | dict[str, str] | None = ...,
+ columns: list[str] | None = ...,
+ chunksize: int = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> Iterator[DataFrame]:
+ ...
+
+
+def read_sql_table(
+ table_name: str,
+ con,
+ schema: str | None = None,
+ index_col: str | list[str] | None = None,
+ coerce_float: bool = True,
+ parse_dates: list[str] | dict[str, str] | None = None,
+ columns: list[str] | None = None,
+ chunksize: int | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame | Iterator[DataFrame]:
+ """
+ Read SQL database table into a DataFrame.
+
+ Given a table name and a SQLAlchemy connectable, returns a DataFrame.
+ This function does not support DBAPI connections.
+
+ Parameters
+ ----------
+ table_name : str
+ Name of SQL table in database.
+ con : SQLAlchemy connectable or str
+ A database URI could be provided as str.
+ SQLite DBAPI connection mode not supported.
+ schema : str, default None
+ Name of SQL schema in database to query (if database flavor
+ supports this). Uses default schema if None (default).
+ index_col : str or list of str, optional, default: None
+ Column(s) to set as index(MultiIndex).
+ coerce_float : bool, default True
+ Attempts to convert values of non-string, non-numeric objects (like
+ decimal.Decimal) to floating point. Can result in loss of Precision.
+ parse_dates : list or dict, default None
+ - List of column names to parse as dates.
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps.
+ - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
+ to the keyword arguments of :func:`pandas.to_datetime`
+ Especially useful with databases without native Datetime support,
+ such as SQLite.
+ columns : list, default None
+ List of column names to select from SQL table.
+ chunksize : int, default None
+ If specified, returns an iterator where `chunksize` is the number of
+ rows to include in each chunk.
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ Returns
+ -------
+ DataFrame or Iterator[DataFrame]
+ A SQL table is returned as two-dimensional data structure with labeled
+ axes.
+
+ See Also
+ --------
+ read_sql_query : Read SQL query into a DataFrame.
+ read_sql : Read SQL query or database table into a DataFrame.
+
+ Notes
+ -----
+ Any datetime values with time zone information will be converted to UTC.
+
+ Examples
+ --------
+ >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
+ """
+
+ check_dtype_backend(dtype_backend)
+ if dtype_backend is lib.no_default:
+ dtype_backend = "numpy" # type: ignore[assignment]
+ assert dtype_backend is not lib.no_default
+
+ with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:
+ if not pandas_sql.has_table(table_name):
+ raise ValueError(f"Table {table_name} not found")
+
+ table = pandas_sql.read_table(
+ table_name,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ columns=columns,
+ chunksize=chunksize,
+ dtype_backend=dtype_backend,
+ )
+
+ if table is not None:
+ return table
+ else:
+ raise ValueError(f"Table {table_name} not found", con)
+
+
+@overload
+def read_sql_query(
+ sql,
+ con,
+ index_col: str | list[str] | None = ...,
+ coerce_float=...,
+ params: list[Any] | Mapping[str, Any] | None = ...,
+ parse_dates: list[str] | dict[str, str] | None = ...,
+ chunksize: None = ...,
+ dtype: DtypeArg | None = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame:
+ ...
+
+
+@overload
+def read_sql_query(
+ sql,
+ con,
+ index_col: str | list[str] | None = ...,
+ coerce_float=...,
+ params: list[Any] | Mapping[str, Any] | None = ...,
+ parse_dates: list[str] | dict[str, str] | None = ...,
+ chunksize: int = ...,
+ dtype: DtypeArg | None = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> Iterator[DataFrame]:
+ ...
+
+
+def read_sql_query(
+ sql,
+ con,
+ index_col: str | list[str] | None = None,
+ coerce_float: bool = True,
+ params: list[Any] | Mapping[str, Any] | None = None,
+ parse_dates: list[str] | dict[str, str] | None = None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame | Iterator[DataFrame]:
+ """
+ Read SQL query into a DataFrame.
+
+ Returns a DataFrame corresponding to the result set of the query
+ string. Optionally provide an `index_col` parameter to use one of the
+ columns as the index, otherwise default integer index will be used.
+
+ Parameters
+ ----------
+ sql : str SQL query or SQLAlchemy Selectable (select or text object)
+ SQL query to be executed.
+ con : SQLAlchemy connectable, str, or sqlite3 connection
+ Using SQLAlchemy makes it possible to use any DB supported by that
+ library. If a DBAPI2 object, only sqlite3 is supported.
+ index_col : str or list of str, optional, default: None
+ Column(s) to set as index(MultiIndex).
+ coerce_float : bool, default True
+ Attempts to convert values of non-string, non-numeric objects (like
+ decimal.Decimal) to floating point. Useful for SQL result sets.
+ params : list, tuple or mapping, optional, default: None
+ List of parameters to pass to execute method. The syntax used
+ to pass parameters is database driver dependent. Check your
+ database driver documentation for which of the five syntax styles,
+ described in PEP 249's paramstyle, is supported.
+ Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
+ parse_dates : list or dict, default: None
+ - List of column names to parse as dates.
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times, or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps.
+ - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
+ to the keyword arguments of :func:`pandas.to_datetime`
+ Especially useful with databases without native Datetime support,
+ such as SQLite.
+ chunksize : int, default None
+ If specified, return an iterator where `chunksize` is the number of
+ rows to include in each chunk.
+ dtype : Type name or dict of columns
+ Data type for data or columns. E.g. np.float64 or
+ {'a': np.float64, 'b': np.int32, 'c': 'Int64'}.
+
+ .. versionadded:: 1.3.0
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ Returns
+ -------
+ DataFrame or Iterator[DataFrame]
+
+ See Also
+ --------
+ read_sql_table : Read SQL database table into a DataFrame.
+ read_sql : Read SQL query or database table into a DataFrame.
+
+ Notes
+ -----
+ Any datetime values with time zone information parsed via the `parse_dates`
+ parameter will be converted to UTC.
+
+ Examples
+ --------
+ >>> from sqlalchemy import create_engine # doctest: +SKIP
+ >>> engine = create_engine("sqlite:///database.db") # doctest: +SKIP
+ >>> with engine.connect() as conn, conn.begin(): # doctest: +SKIP
+ ... data = pd.read_sql_table("data", conn) # doctest: +SKIP
+ """
+
+ check_dtype_backend(dtype_backend)
+ if dtype_backend is lib.no_default:
+ dtype_backend = "numpy" # type: ignore[assignment]
+ assert dtype_backend is not lib.no_default
+
+ with pandasSQL_builder(con) as pandas_sql:
+ return pandas_sql.read_query(
+ sql,
+ index_col=index_col,
+ params=params,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ chunksize=chunksize,
+ dtype=dtype,
+ dtype_backend=dtype_backend,
+ )
+
+
+@overload
+def read_sql(
+ sql,
+ con,
+ index_col: str | list[str] | None = ...,
+ coerce_float=...,
+ params=...,
+ parse_dates=...,
+ columns: list[str] = ...,
+ chunksize: None = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ dtype: DtypeArg | None = None,
+) -> DataFrame:
+ ...
+
+
+@overload
+def read_sql(
+ sql,
+ con,
+ index_col: str | list[str] | None = ...,
+ coerce_float=...,
+ params=...,
+ parse_dates=...,
+ columns: list[str] = ...,
+ chunksize: int = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ dtype: DtypeArg | None = None,
+) -> Iterator[DataFrame]:
+ ...
+
+
+def read_sql(
+ sql,
+ con,
+ index_col: str | list[str] | None = None,
+ coerce_float: bool = True,
+ params=None,
+ parse_dates=None,
+ columns: list[str] | None = None,
+ chunksize: int | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ dtype: DtypeArg | None = None,
+) -> DataFrame | Iterator[DataFrame]:
+ """
+ Read SQL query or database table into a DataFrame.
+
+ This function is a convenience wrapper around ``read_sql_table`` and
+ ``read_sql_query`` (for backward compatibility). It will delegate
+ to the specific function depending on the provided input. A SQL query
+ will be routed to ``read_sql_query``, while a database table name will
+ be routed to ``read_sql_table``. Note that the delegated function might
+ have more specific notes about their functionality not listed here.
+
+ Parameters
+ ----------
+ sql : str or SQLAlchemy Selectable (select or text object)
+ SQL query to be executed or a table name.
+ con : ADBC Connection, SQLAlchemy connectable, str, or sqlite3 connection
+ ADBC provides high performance I/O with native type support, where available.
+ Using SQLAlchemy makes it possible to use any DB supported by that
+ library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
+ for engine disposal and connection closure for the ADBC connection and
+ SQLAlchemy connectable; str connections are closed automatically. See
+ `here `_.
+ index_col : str or list of str, optional, default: None
+ Column(s) to set as index(MultiIndex).
+ coerce_float : bool, default True
+ Attempts to convert values of non-string, non-numeric objects (like
+ decimal.Decimal) to floating point, useful for SQL result sets.
+ params : list, tuple or dict, optional, default: None
+ List of parameters to pass to execute method. The syntax used
+ to pass parameters is database driver dependent. Check your
+ database driver documentation for which of the five syntax styles,
+ described in PEP 249's paramstyle, is supported.
+ Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
+ parse_dates : list or dict, default: None
+ - List of column names to parse as dates.
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times, or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps.
+ - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
+ to the keyword arguments of :func:`pandas.to_datetime`
+ Especially useful with databases without native Datetime support,
+ such as SQLite.
+ columns : list, default: None
+ List of column names to select from SQL table (only used when reading
+ a table).
+ chunksize : int, default None
+ If specified, return an iterator where `chunksize` is the
+ number of rows to include in each chunk.
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+ dtype : Type name or dict of columns
+ Data type for data or columns. E.g. np.float64 or
+ {'a': np.float64, 'b': np.int32, 'c': 'Int64'}.
+ The argument is ignored if a table is passed instead of a query.
+
+ .. versionadded:: 2.0.0
+
+ Returns
+ -------
+ DataFrame or Iterator[DataFrame]
+
+ See Also
+ --------
+ read_sql_table : Read SQL database table into a DataFrame.
+ read_sql_query : Read SQL query into a DataFrame.
+
+ Examples
+ --------
+ Read data from SQL via either a SQL query or a SQL tablename.
+ When using a SQLite database only SQL queries are accepted,
+ providing only the SQL tablename will result in an error.
+
+ >>> from sqlite3 import connect
+ >>> conn = connect(':memory:')
+ >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
+ ... columns=['int_column', 'date_column'])
+ >>> df.to_sql(name='test_data', con=conn)
+ 2
+
+ >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
+ int_column date_column
+ 0 0 10/11/12
+ 1 1 12/11/10
+
+ >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
+
+ Apply date parsing to columns through the ``parse_dates`` argument
+ The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
+ Custom argument values for applying ``pd.to_datetime`` on a column are specified
+ via a dictionary format:
+
+ >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
+ ... conn,
+ ... parse_dates={"date_column": {"format": "%d/%m/%y"}})
+ int_column date_column
+ 0 0 2012-11-10
+ 1 1 2010-11-12
+
+ .. versionadded:: 2.2.0
+
+ pandas now supports reading via ADBC drivers
+
+ >>> from adbc_driver_postgresql import dbapi # doctest:+SKIP
+ >>> with dbapi.connect('postgres:///db_name') as conn: # doctest:+SKIP
+ ... pd.read_sql('SELECT int_column FROM test_data', conn)
+ int_column
+ 0 0
+ 1 1
+ """
+
+ check_dtype_backend(dtype_backend)
+ if dtype_backend is lib.no_default:
+ dtype_backend = "numpy" # type: ignore[assignment]
+ assert dtype_backend is not lib.no_default
+
+ with pandasSQL_builder(con) as pandas_sql:
+ if isinstance(pandas_sql, SQLiteDatabase):
+ return pandas_sql.read_query(
+ sql,
+ index_col=index_col,
+ params=params,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ chunksize=chunksize,
+ dtype_backend=dtype_backend,
+ dtype=dtype,
+ )
+
+ try:
+ _is_table_name = pandas_sql.has_table(sql)
+ except Exception:
+ # using generic exception to catch errors from sql drivers (GH24988)
+ _is_table_name = False
+
+ if _is_table_name:
+ return pandas_sql.read_table(
+ sql,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ columns=columns,
+ chunksize=chunksize,
+ dtype_backend=dtype_backend,
+ )
+ else:
+ return pandas_sql.read_query(
+ sql,
+ index_col=index_col,
+ params=params,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ chunksize=chunksize,
+ dtype_backend=dtype_backend,
+ dtype=dtype,
+ )
+
+
+def to_sql(
+ frame,
+ name: str,
+ con,
+ schema: str | None = None,
+ if_exists: Literal["fail", "replace", "append"] = "fail",
+ index: bool = True,
+ index_label: IndexLabel | None = None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ method: Literal["multi"] | Callable | None = None,
+ engine: str = "auto",
+ **engine_kwargs,
+) -> int | None:
+ """
+ Write records stored in a DataFrame to a SQL database.
+
+ Parameters
+ ----------
+ frame : DataFrame, Series
+ name : str
+ Name of SQL table.
+ con : ADBC Connection, SQLAlchemy connectable, str, or sqlite3 connection
+ or sqlite3 DBAPI2 connection
+ ADBC provides high performance I/O with native type support, where available.
+ Using SQLAlchemy makes it possible to use any DB supported by that
+ library.
+ If a DBAPI2 object, only sqlite3 is supported.
+ schema : str, optional
+ Name of SQL schema in database to write to (if database flavor
+ supports this). If None, use default schema (default).
+ if_exists : {'fail', 'replace', 'append'}, default 'fail'
+ - fail: If table exists, do nothing.
+ - replace: If table exists, drop it, recreate it, and insert data.
+ - append: If table exists, insert data. Create if does not exist.
+ index : bool, default True
+ Write DataFrame index as a column.
+ index_label : str or sequence, optional
+ Column label for index column(s). If None is given (default) and
+ `index` is True, then the index names are used.
+ A sequence should be given if the DataFrame uses MultiIndex.
+ chunksize : int, optional
+ Specify the number of rows in each batch to be written at a time.
+ By default, all rows will be written at once.
+ dtype : dict or scalar, optional
+ Specifying the datatype for columns. If a dictionary is used, the
+ keys should be the column names and the values should be the
+ SQLAlchemy types or strings for the sqlite3 fallback mode. If a
+ scalar is provided, it will be applied to all columns.
+ method : {None, 'multi', callable}, optional
+ Controls the SQL insertion clause used:
+
+ - None : Uses standard SQL ``INSERT`` clause (one per row).
+ - ``'multi'``: Pass multiple values in a single ``INSERT`` clause.
+ - callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``.
+
+ Details and a sample callable implementation can be found in the
+ section :ref:`insert method `.
+ engine : {'auto', 'sqlalchemy'}, default 'auto'
+ SQL engine library to use. If 'auto', then the option
+ ``io.sql.engine`` is used. The default ``io.sql.engine``
+ behavior is 'sqlalchemy'
+
+ .. versionadded:: 1.3.0
+
+ **engine_kwargs
+ Any additional kwargs are passed to the engine.
+
+ Returns
+ -------
+ None or int
+ Number of rows affected by to_sql. None is returned if the callable
+ passed into ``method`` does not return an integer number of rows.
+
+ .. versionadded:: 1.4.0
+
+ Notes
+ -----
+ The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor``
+ or SQLAlchemy connectable. If using ADBC the returned rows are the result
+ of ``Cursor.adbc_ingest``. The returned value may not reflect the exact number of written
+ rows as stipulated in the
+ `sqlite3 `__ or
+ `SQLAlchemy `__
+ """ # noqa: E501
+ if if_exists not in ("fail", "replace", "append"):
+ raise ValueError(f"'{if_exists}' is not valid for if_exists")
+
+ if isinstance(frame, Series):
+ frame = frame.to_frame()
+ elif not isinstance(frame, DataFrame):
+ raise NotImplementedError(
+ "'frame' argument should be either a Series or a DataFrame"
+ )
+
+ with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:
+ return pandas_sql.to_sql(
+ frame,
+ name,
+ if_exists=if_exists,
+ index=index,
+ index_label=index_label,
+ schema=schema,
+ chunksize=chunksize,
+ dtype=dtype,
+ method=method,
+ engine=engine,
+ **engine_kwargs,
+ )
+
+
+def has_table(table_name: str, con, schema: str | None = None) -> bool:
+ """
+ Check if DataBase has named table.
+
+ Parameters
+ ----------
+ table_name: string
+ Name of SQL table.
+ con: ADBC Connection, SQLAlchemy connectable, str, or sqlite3 connection
+ ADBC provides high performance I/O with native type support, where available.
+ Using SQLAlchemy makes it possible to use any DB supported by that
+ library.
+ If a DBAPI2 object, only sqlite3 is supported.
+ schema : string, default None
+ Name of SQL schema in database to write to (if database flavor supports
+ this). If None, use default schema (default).
+
+ Returns
+ -------
+ boolean
+ """
+ with pandasSQL_builder(con, schema=schema) as pandas_sql:
+ return pandas_sql.has_table(table_name)
+
+
+table_exists = has_table
+
+
+def pandasSQL_builder(
+ con,
+ schema: str | None = None,
+ need_transaction: bool = False,
+) -> PandasSQL:
+ """
+ Convenience function to return the correct PandasSQL subclass based on the
+ provided parameters. Also creates a sqlalchemy connection and transaction
+ if necessary.
+ """
+ import sqlite3
+
+ if isinstance(con, sqlite3.Connection) or con is None:
+ return SQLiteDatabase(con)
+
+ sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore")
+
+ if isinstance(con, str) and sqlalchemy is None:
+ raise ImportError("Using URI string without sqlalchemy installed.")
+
+ if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)):
+ return SQLDatabase(con, schema, need_transaction)
+
+ adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore")
+ if adbc and isinstance(con, adbc.Connection):
+ return ADBCDatabase(con)
+
+ warnings.warn(
+ "pandas only supports SQLAlchemy connectable (engine/connection) or "
+ "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 "
+ "objects are not tested. Please consider using SQLAlchemy.",
+ UserWarning,
+ stacklevel=find_stack_level(),
+ )
+ return SQLiteDatabase(con)
+
+
+class SQLTable(PandasObject):
+ """
+ For mapping Pandas tables to SQL tables.
+ Uses fact that table is reflected by SQLAlchemy to
+ do better type conversions.
+ Also holds various flags needed to avoid having to
+ pass them between functions all the time.
+ """
+
+ # TODO: support for multiIndex
+
+ def __init__(
+ self,
+ name: str,
+ pandas_sql_engine,
+ frame=None,
+ index: bool | str | list[str] | None = True,
+ if_exists: Literal["fail", "replace", "append"] = "fail",
+ prefix: str = "pandas",
+ index_label=None,
+ schema=None,
+ keys=None,
+ dtype: DtypeArg | None = None,
+ ) -> None:
+ self.name = name
+ self.pd_sql = pandas_sql_engine
+ self.prefix = prefix
+ self.frame = frame
+ self.index = self._index_name(index, index_label)
+ self.schema = schema
+ self.if_exists = if_exists
+ self.keys = keys
+ self.dtype = dtype
+
+ if frame is not None:
+ # We want to initialize based on a dataframe
+ self.table = self._create_table_setup()
+ else:
+ # no data provided, read-only mode
+ self.table = self.pd_sql.get_table(self.name, self.schema)
+
+ if self.table is None:
+ raise ValueError(f"Could not init table '{name}'")
+
+ if not len(self.name):
+ raise ValueError("Empty table name specified")
+
+ def exists(self):
+ return self.pd_sql.has_table(self.name, self.schema)
+
+ def sql_schema(self) -> str:
+ from sqlalchemy.schema import CreateTable
+
+ return str(CreateTable(self.table).compile(self.pd_sql.con))
+
+ def _execute_create(self) -> None:
+ # Inserting table into database, add to MetaData object
+ self.table = self.table.to_metadata(self.pd_sql.meta)
+ with self.pd_sql.run_transaction():
+ self.table.create(bind=self.pd_sql.con)
+
+ def create(self) -> None:
+ if self.exists():
+ if self.if_exists == "fail":
+ raise ValueError(f"Table '{self.name}' already exists.")
+ if self.if_exists == "replace":
+ self.pd_sql.drop_table(self.name, self.schema)
+ self._execute_create()
+ elif self.if_exists == "append":
+ pass
+ else:
+ raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
+ else:
+ self._execute_create()
+
+ def _execute_insert(self, conn, keys: list[str], data_iter) -> int:
+ """
+ Execute SQL statement inserting data
+
+ Parameters
+ ----------
+ conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
+ keys : list of str
+ Column names
+ data_iter : generator of list
+ Each item contains a list of values to be inserted
+ """
+ data = [dict(zip(keys, row)) for row in data_iter]
+ result = conn.execute(self.table.insert(), data)
+ return result.rowcount
+
+ def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int:
+ """
+ Alternative to _execute_insert for DBs support multi-value INSERT.
+
+ Note: multi-value insert is usually faster for analytics DBs
+ and tables containing a few columns
+ but performance degrades quickly with increase of columns.
+
+ """
+
+ from sqlalchemy import insert
+
+ data = [dict(zip(keys, row)) for row in data_iter]
+ stmt = insert(self.table).values(data)
+ result = conn.execute(stmt)
+ return result.rowcount
+
+ def insert_data(self) -> tuple[list[str], list[np.ndarray]]:
+ if self.index is not None:
+ temp = self.frame.copy()
+ temp.index.names = self.index
+ try:
+ temp.reset_index(inplace=True)
+ except ValueError as err:
+ raise ValueError(f"duplicate name in index/columns: {err}") from err
+ else:
+ temp = self.frame
+
+ column_names = list(map(str, temp.columns))
+ ncols = len(column_names)
+ # this just pre-allocates the list: None's will be replaced with ndarrays
+ # error: List item 0 has incompatible type "None"; expected "ndarray"
+ data_list: list[np.ndarray] = [None] * ncols # type: ignore[list-item]
+
+ for i, (_, ser) in enumerate(temp.items()):
+ if ser.dtype.kind == "M":
+ if isinstance(ser._values, ArrowExtensionArray):
+ import pyarrow as pa
+
+ if pa.types.is_date(ser.dtype.pyarrow_dtype):
+ # GH#53854 to_pydatetime not supported for pyarrow date dtypes
+ d = ser._values.to_numpy(dtype=object)
+ else:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", category=FutureWarning)
+ # GH#52459 to_pydatetime will return Index[object]
+ d = np.asarray(ser.dt.to_pydatetime(), dtype=object)
+ else:
+ d = ser._values.to_pydatetime()
+ elif ser.dtype.kind == "m":
+ vals = ser._values
+ if isinstance(vals, ArrowExtensionArray):
+ vals = vals.to_numpy(dtype=np.dtype("m8[ns]"))
+ # store as integers, see GH#6921, GH#7076
+ d = vals.view("i8").astype(object)
+ else:
+ d = ser._values.astype(object)
+
+ assert isinstance(d, np.ndarray), type(d)
+
+ if ser._can_hold_na:
+ # Note: this will miss timedeltas since they are converted to int
+ mask = isna(d)
+ d[mask] = None
+
+ data_list[i] = d
+
+ return column_names, data_list
+
+ def insert(
+ self,
+ chunksize: int | None = None,
+ method: Literal["multi"] | Callable | None = None,
+ ) -> int | None:
+ # set insert method
+ if method is None:
+ exec_insert = self._execute_insert
+ elif method == "multi":
+ exec_insert = self._execute_insert_multi
+ elif callable(method):
+ exec_insert = partial(method, self)
+ else:
+ raise ValueError(f"Invalid parameter `method`: {method}")
+
+ keys, data_list = self.insert_data()
+
+ nrows = len(self.frame)
+
+ if nrows == 0:
+ return 0
+
+ if chunksize is None:
+ chunksize = nrows
+ elif chunksize == 0:
+ raise ValueError("chunksize argument should be non-zero")
+
+ chunks = (nrows // chunksize) + 1
+ total_inserted = None
+ with self.pd_sql.run_transaction() as conn:
+ for i in range(chunks):
+ start_i = i * chunksize
+ end_i = min((i + 1) * chunksize, nrows)
+ if start_i >= end_i:
+ break
+
+ chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))
+ num_inserted = exec_insert(conn, keys, chunk_iter)
+ # GH 46891
+ if num_inserted is not None:
+ if total_inserted is None:
+ total_inserted = num_inserted
+ else:
+ total_inserted += num_inserted
+ return total_inserted
+
+ def _query_iterator(
+ self,
+ result,
+ exit_stack: ExitStack,
+ chunksize: int | None,
+ columns,
+ coerce_float: bool = True,
+ parse_dates=None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ):
+ """Return generator through chunked result set."""
+ has_read_data = False
+ with exit_stack:
+ while True:
+ data = result.fetchmany(chunksize)
+ if not data:
+ if not has_read_data:
+ yield DataFrame.from_records(
+ [], columns=columns, coerce_float=coerce_float
+ )
+ break
+
+ has_read_data = True
+ self.frame = _convert_arrays_to_dataframe(
+ data, columns, coerce_float, dtype_backend
+ )
+
+ self._harmonize_columns(
+ parse_dates=parse_dates, dtype_backend=dtype_backend
+ )
+
+ if self.index is not None:
+ self.frame.set_index(self.index, inplace=True)
+
+ yield self.frame
+
+ def read(
+ self,
+ exit_stack: ExitStack,
+ coerce_float: bool = True,
+ parse_dates=None,
+ columns=None,
+ chunksize: int | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ) -> DataFrame | Iterator[DataFrame]:
+ from sqlalchemy import select
+
+ if columns is not None and len(columns) > 0:
+ cols = [self.table.c[n] for n in columns]
+ if self.index is not None:
+ for idx in self.index[::-1]:
+ cols.insert(0, self.table.c[idx])
+ sql_select = select(*cols)
+ else:
+ sql_select = select(self.table)
+ result = self.pd_sql.execute(sql_select)
+ column_names = result.keys()
+
+ if chunksize is not None:
+ return self._query_iterator(
+ result,
+ exit_stack,
+ chunksize,
+ column_names,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype_backend=dtype_backend,
+ )
+ else:
+ data = result.fetchall()
+ self.frame = _convert_arrays_to_dataframe(
+ data, column_names, coerce_float, dtype_backend
+ )
+
+ self._harmonize_columns(
+ parse_dates=parse_dates, dtype_backend=dtype_backend
+ )
+
+ if self.index is not None:
+ self.frame.set_index(self.index, inplace=True)
+
+ return self.frame
+
+ def _index_name(self, index, index_label):
+ # for writing: index=True to include index in sql table
+ if index is True:
+ nlevels = self.frame.index.nlevels
+ # if index_label is specified, set this as index name(s)
+ if index_label is not None:
+ if not isinstance(index_label, list):
+ index_label = [index_label]
+ if len(index_label) != nlevels:
+ raise ValueError(
+ "Length of 'index_label' should match number of "
+ f"levels, which is {nlevels}"
+ )
+ return index_label
+ # return the used column labels for the index columns
+ if (
+ nlevels == 1
+ and "index" not in self.frame.columns
+ and self.frame.index.name is None
+ ):
+ return ["index"]
+ else:
+ return com.fill_missing_names(self.frame.index.names)
+
+ # for reading: index=(list of) string to specify column to set as index
+ elif isinstance(index, str):
+ return [index]
+ elif isinstance(index, list):
+ return index
+ else:
+ return None
+
+ def _get_column_names_and_types(self, dtype_mapper):
+ column_names_and_types = []
+ if self.index is not None:
+ for i, idx_label in enumerate(self.index):
+ idx_type = dtype_mapper(self.frame.index._get_level_values(i))
+ column_names_and_types.append((str(idx_label), idx_type, True))
+
+ column_names_and_types += [
+ (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
+ for i in range(len(self.frame.columns))
+ ]
+
+ return column_names_and_types
+
+ def _create_table_setup(self):
+ from sqlalchemy import (
+ Column,
+ PrimaryKeyConstraint,
+ Table,
+ )
+ from sqlalchemy.schema import MetaData
+
+ column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
+
+ columns: list[Any] = [
+ Column(name, typ, index=is_index)
+ for name, typ, is_index in column_names_and_types
+ ]
+
+ if self.keys is not None:
+ if not is_list_like(self.keys):
+ keys = [self.keys]
+ else:
+ keys = self.keys
+ pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
+ columns.append(pkc)
+
+ schema = self.schema or self.pd_sql.meta.schema
+
+ # At this point, attach to new metadata, only attach to self.meta
+ # once table is created.
+ meta = MetaData()
+ return Table(self.name, meta, *columns, schema=schema)
+
+ def _harmonize_columns(
+ self,
+ parse_dates=None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ) -> None:
+ """
+ Make the DataFrame's column types align with the SQL table
+ column types.
+ Need to work around limited NA value support. Floats are always
+ fine, ints must always be floats if there are Null values.
+ Booleans are hard because converting bool column with None replaces
+ all Nones with false. Therefore only convert bool if there are no
+ NA values.
+ Datetimes should already be converted to np.datetime64 if supported,
+ but here we also force conversion if required.
+ """
+ parse_dates = _process_parse_dates_argument(parse_dates)
+
+ for sql_col in self.table.columns:
+ col_name = sql_col.name
+ try:
+ df_col = self.frame[col_name]
+
+ # Handle date parsing upfront; don't try to convert columns
+ # twice
+ if col_name in parse_dates:
+ try:
+ fmt = parse_dates[col_name]
+ except TypeError:
+ fmt = None
+ self.frame[col_name] = _handle_date_column(df_col, format=fmt)
+ continue
+
+ # the type the dataframe column should have
+ col_type = self._get_dtype(sql_col.type)
+
+ if (
+ col_type is datetime
+ or col_type is date
+ or col_type is DatetimeTZDtype
+ ):
+ # Convert tz-aware Datetime SQL columns to UTC
+ utc = col_type is DatetimeTZDtype
+ self.frame[col_name] = _handle_date_column(df_col, utc=utc)
+ elif dtype_backend == "numpy" and col_type is float:
+ # floats support NA, can always convert!
+ self.frame[col_name] = df_col.astype(col_type, copy=False)
+
+ elif dtype_backend == "numpy" and len(df_col) == df_col.count():
+ # No NA values, can convert ints and bools
+ if col_type is np.dtype("int64") or col_type is bool:
+ self.frame[col_name] = df_col.astype(col_type, copy=False)
+ except KeyError:
+ pass # this column not in results
+
+ def _sqlalchemy_type(self, col: Index | Series):
+ dtype: DtypeArg = self.dtype or {}
+ if is_dict_like(dtype):
+ dtype = cast(dict, dtype)
+ if col.name in dtype:
+ return dtype[col.name]
+
+ # Infer type of column, while ignoring missing values.
+ # Needed for inserting typed data containing NULLs, GH 8778.
+ col_type = lib.infer_dtype(col, skipna=True)
+
+ from sqlalchemy.types import (
+ TIMESTAMP,
+ BigInteger,
+ Boolean,
+ Date,
+ DateTime,
+ Float,
+ Integer,
+ SmallInteger,
+ Text,
+ Time,
+ )
+
+ if col_type in ("datetime64", "datetime"):
+ # GH 9086: TIMESTAMP is the suggested type if the column contains
+ # timezone information
+ try:
+ # error: Item "Index" of "Union[Index, Series]" has no attribute "dt"
+ if col.dt.tz is not None: # type: ignore[union-attr]
+ return TIMESTAMP(timezone=True)
+ except AttributeError:
+ # The column is actually a DatetimeIndex
+ # GH 26761 or an Index with date-like data e.g. 9999-01-01
+ if getattr(col, "tz", None) is not None:
+ return TIMESTAMP(timezone=True)
+ return DateTime
+ if col_type == "timedelta64":
+ warnings.warn(
+ "the 'timedelta' type is not supported, and will be "
+ "written as integer values (ns frequency) to the database.",
+ UserWarning,
+ stacklevel=find_stack_level(),
+ )
+ return BigInteger
+ elif col_type == "floating":
+ if col.dtype == "float32":
+ return Float(precision=23)
+ else:
+ return Float(precision=53)
+ elif col_type == "integer":
+ # GH35076 Map pandas integer to optimal SQLAlchemy integer type
+ if col.dtype.name.lower() in ("int8", "uint8", "int16"):
+ return SmallInteger
+ elif col.dtype.name.lower() in ("uint16", "int32"):
+ return Integer
+ elif col.dtype.name.lower() == "uint64":
+ raise ValueError("Unsigned 64 bit integer datatype is not supported")
+ else:
+ return BigInteger
+ elif col_type == "boolean":
+ return Boolean
+ elif col_type == "date":
+ return Date
+ elif col_type == "time":
+ return Time
+ elif col_type == "complex":
+ raise ValueError("Complex datatypes not supported")
+
+ return Text
+
+ def _get_dtype(self, sqltype):
+ from sqlalchemy.types import (
+ TIMESTAMP,
+ Boolean,
+ Date,
+ DateTime,
+ Float,
+ Integer,
+ )
+
+ if isinstance(sqltype, Float):
+ return float
+ elif isinstance(sqltype, Integer):
+ # TODO: Refine integer size.
+ return np.dtype("int64")
+ elif isinstance(sqltype, TIMESTAMP):
+ # we have a timezone capable type
+ if not sqltype.timezone:
+ return datetime
+ return DatetimeTZDtype
+ elif isinstance(sqltype, DateTime):
+ # Caution: np.datetime64 is also a subclass of np.number.
+ return datetime
+ elif isinstance(sqltype, Date):
+ return date
+ elif isinstance(sqltype, Boolean):
+ return bool
+ return object
+
+
+class PandasSQL(PandasObject, ABC):
+ """
+ Subclasses Should define read_query and to_sql.
+ """
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(self, *args) -> None:
+ pass
+
+ def read_table(
+ self,
+ table_name: str,
+ index_col: str | list[str] | None = None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ columns=None,
+ schema: str | None = None,
+ chunksize: int | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ) -> DataFrame | Iterator[DataFrame]:
+ raise NotImplementedError
+
+ @abstractmethod
+ def read_query(
+ self,
+ sql: str,
+ index_col: str | list[str] | None = None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ params=None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ) -> DataFrame | Iterator[DataFrame]:
+ pass
+
+ @abstractmethod
+ def to_sql(
+ self,
+ frame,
+ name: str,
+ if_exists: Literal["fail", "replace", "append"] = "fail",
+ index: bool = True,
+ index_label=None,
+ schema=None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ method: Literal["multi"] | Callable | None = None,
+ engine: str = "auto",
+ **engine_kwargs,
+ ) -> int | None:
+ pass
+
+ @abstractmethod
+ def execute(self, sql: str | Select | TextClause, params=None):
+ pass
+
+ @abstractmethod
+ def has_table(self, name: str, schema: str | None = None) -> bool:
+ pass
+
+ @abstractmethod
+ def _create_sql_schema(
+ self,
+ frame: DataFrame,
+ table_name: str,
+ keys: list[str] | None = None,
+ dtype: DtypeArg | None = None,
+ schema: str | None = None,
+ ) -> str:
+ pass
+
+
+class BaseEngine:
+ def insert_records(
+ self,
+ table: SQLTable,
+ con,
+ frame,
+ name: str,
+ index: bool | str | list[str] | None = True,
+ schema=None,
+ chunksize: int | None = None,
+ method=None,
+ **engine_kwargs,
+ ) -> int | None:
+ """
+ Inserts data into already-prepared table
+ """
+ raise AbstractMethodError(self)
+
+
+class SQLAlchemyEngine(BaseEngine):
+ def __init__(self) -> None:
+ import_optional_dependency(
+ "sqlalchemy", extra="sqlalchemy is required for SQL support."
+ )
+
+ def insert_records(
+ self,
+ table: SQLTable,
+ con,
+ frame,
+ name: str,
+ index: bool | str | list[str] | None = True,
+ schema=None,
+ chunksize: int | None = None,
+ method=None,
+ **engine_kwargs,
+ ) -> int | None:
+ from sqlalchemy import exc
+
+ try:
+ return table.insert(chunksize=chunksize, method=method)
+ except exc.StatementError as err:
+ # GH34431
+ # https://stackoverflow.com/a/67358288/6067848
+ msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?#
+ )|inf can not be used with MySQL"""
+ err_text = str(err.orig)
+ if re.search(msg, err_text):
+ raise ValueError("inf cannot be used with MySQL") from err
+ raise err
+
+
+def get_engine(engine: str) -> BaseEngine:
+ """return our implementation"""
+ if engine == "auto":
+ engine = get_option("io.sql.engine")
+
+ if engine == "auto":
+ # try engines in this order
+ engine_classes = [SQLAlchemyEngine]
+
+ error_msgs = ""
+ for engine_class in engine_classes:
+ try:
+ return engine_class()
+ except ImportError as err:
+ error_msgs += "\n - " + str(err)
+
+ raise ImportError(
+ "Unable to find a usable engine; "
+ "tried using: 'sqlalchemy'.\n"
+ "A suitable version of "
+ "sqlalchemy is required for sql I/O "
+ "support.\n"
+ "Trying to import the above resulted in these errors:"
+ f"{error_msgs}"
+ )
+
+ if engine == "sqlalchemy":
+ return SQLAlchemyEngine()
+
+ raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
+
+
+class SQLDatabase(PandasSQL):
+ """
+ This class enables conversion between DataFrame and SQL databases
+ using SQLAlchemy to handle DataBase abstraction.
+
+ Parameters
+ ----------
+ con : SQLAlchemy Connectable or URI string.
+ Connectable to connect with the database. Using SQLAlchemy makes it
+ possible to use any DB supported by that library.
+ schema : string, default None
+ Name of SQL schema in database to write to (if database flavor
+ supports this). If None, use default schema (default).
+ need_transaction : bool, default False
+ If True, SQLDatabase will create a transaction.
+
+ """
+
+ def __init__(
+ self, con, schema: str | None = None, need_transaction: bool = False
+ ) -> None:
+ from sqlalchemy import create_engine
+ from sqlalchemy.engine import Engine
+ from sqlalchemy.schema import MetaData
+
+ # self.exit_stack cleans up the Engine and Connection and commits the
+ # transaction if any of those objects was created below.
+ # Cleanup happens either in self.__exit__ or at the end of the iterator
+ # returned by read_sql when chunksize is not None.
+ self.exit_stack = ExitStack()
+ if isinstance(con, str):
+ con = create_engine(con)
+ self.exit_stack.callback(con.dispose)
+ if isinstance(con, Engine):
+ con = self.exit_stack.enter_context(con.connect())
+ if need_transaction and not con.in_transaction():
+ self.exit_stack.enter_context(con.begin())
+ self.con = con
+ self.meta = MetaData(schema=schema)
+ self.returns_generator = False
+
+ def __exit__(self, *args) -> None:
+ if not self.returns_generator:
+ self.exit_stack.close()
+
+ @contextmanager
+ def run_transaction(self):
+ if not self.con.in_transaction():
+ with self.con.begin():
+ yield self.con
+ else:
+ yield self.con
+
+ def execute(self, sql: str | Select | TextClause, params=None):
+ """Simple passthrough to SQLAlchemy connectable"""
+ args = [] if params is None else [params]
+ if isinstance(sql, str):
+ return self.con.exec_driver_sql(sql, *args)
+ return self.con.execute(sql, *args)
+
+ def read_table(
+ self,
+ table_name: str,
+ index_col: str | list[str] | None = None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ columns=None,
+ schema: str | None = None,
+ chunksize: int | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ) -> DataFrame | Iterator[DataFrame]:
+ """
+ Read SQL database table into a DataFrame.
+
+ Parameters
+ ----------
+ table_name : str
+ Name of SQL table in database.
+ index_col : string, optional, default: None
+ Column to set as index.
+ coerce_float : bool, default True
+ Attempts to convert values of non-string, non-numeric objects
+ (like decimal.Decimal) to floating point. This can result in
+ loss of precision.
+ parse_dates : list or dict, default: None
+ - List of column names to parse as dates.
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times, or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps.
+ - Dict of ``{column_name: arg}``, where the arg corresponds
+ to the keyword arguments of :func:`pandas.to_datetime`.
+ Especially useful with databases without native Datetime support,
+ such as SQLite.
+ columns : list, default: None
+ List of column names to select from SQL table.
+ schema : string, default None
+ Name of SQL schema in database to query (if database flavor
+ supports this). If specified, this overwrites the default
+ schema of the SQL database object.
+ chunksize : int, default None
+ If specified, return an iterator where `chunksize` is the number
+ of rows to include in each chunk.
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ Returns
+ -------
+ DataFrame
+
+ See Also
+ --------
+ pandas.read_sql_table
+ SQLDatabase.read_query
+
+ """
+ self.meta.reflect(bind=self.con, only=[table_name], views=True)
+ table = SQLTable(table_name, self, index=index_col, schema=schema)
+ if chunksize is not None:
+ self.returns_generator = True
+ return table.read(
+ self.exit_stack,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ columns=columns,
+ chunksize=chunksize,
+ dtype_backend=dtype_backend,
+ )
+
+ @staticmethod
+ def _query_iterator(
+ result,
+ exit_stack: ExitStack,
+ chunksize: int,
+ columns,
+ index_col=None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ dtype: DtypeArg | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ):
+ """Return generator through chunked result set"""
+ has_read_data = False
+ with exit_stack:
+ while True:
+ data = result.fetchmany(chunksize)
+ if not data:
+ if not has_read_data:
+ yield _wrap_result(
+ [],
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ dtype_backend=dtype_backend,
+ )
+ break
+
+ has_read_data = True
+ yield _wrap_result(
+ data,
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ dtype_backend=dtype_backend,
+ )
+
+ def read_query(
+ self,
+ sql: str,
+ index_col: str | list[str] | None = None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ params=None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ) -> DataFrame | Iterator[DataFrame]:
+ """
+ Read SQL query into a DataFrame.
+
+ Parameters
+ ----------
+ sql : str
+ SQL query to be executed.
+ index_col : string, optional, default: None
+ Column name to use as index for the returned DataFrame object.
+ coerce_float : bool, default True
+ Attempt to convert values of non-string, non-numeric objects (like
+ decimal.Decimal) to floating point, useful for SQL result sets.
+ params : list, tuple or dict, optional, default: None
+ List of parameters to pass to execute method. The syntax used
+ to pass parameters is database driver dependent. Check your
+ database driver documentation for which of the five syntax styles,
+ described in PEP 249's paramstyle, is supported.
+ Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
+ parse_dates : list or dict, default: None
+ - List of column names to parse as dates.
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times, or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps.
+ - Dict of ``{column_name: arg dict}``, where the arg dict
+ corresponds to the keyword arguments of
+ :func:`pandas.to_datetime` Especially useful with databases
+ without native Datetime support, such as SQLite.
+ chunksize : int, default None
+ If specified, return an iterator where `chunksize` is the number
+ of rows to include in each chunk.
+ dtype : Type name or dict of columns
+ Data type for data or columns. E.g. np.float64 or
+ {'a': np.float64, 'b': np.int32, 'c': 'Int64'}
+
+ .. versionadded:: 1.3.0
+
+ Returns
+ -------
+ DataFrame
+
+ See Also
+ --------
+ read_sql_table : Read SQL database table into a DataFrame.
+ read_sql
+
+ """
+ result = self.execute(sql, params)
+ columns = result.keys()
+
+ if chunksize is not None:
+ self.returns_generator = True
+ return self._query_iterator(
+ result,
+ self.exit_stack,
+ chunksize,
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ dtype_backend=dtype_backend,
+ )
+ else:
+ data = result.fetchall()
+ frame = _wrap_result(
+ data,
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ dtype_backend=dtype_backend,
+ )
+ return frame
+
+ read_sql = read_query
+
+ def prep_table(
+ self,
+ frame,
+ name: str,
+ if_exists: Literal["fail", "replace", "append"] = "fail",
+ index: bool | str | list[str] | None = True,
+ index_label=None,
+ schema=None,
+ dtype: DtypeArg | None = None,
+ ) -> SQLTable:
+ """
+ Prepares table in the database for data insertion. Creates it if needed, etc.
+ """
+ if dtype:
+ if not is_dict_like(dtype):
+ # error: Value expression in dictionary comprehension has incompatible
+ # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
+ # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
+ # Type[str], Type[float], Type[int], Type[complex], Type[bool],
+ # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
+ # dtype[Any], Type[object]]"
+ dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
+ else:
+ dtype = cast(dict, dtype)
+
+ from sqlalchemy.types import TypeEngine
+
+ for col, my_type in dtype.items():
+ if isinstance(my_type, type) and issubclass(my_type, TypeEngine):
+ pass
+ elif isinstance(my_type, TypeEngine):
+ pass
+ else:
+ raise ValueError(f"The type of {col} is not a SQLAlchemy type")
+
+ table = SQLTable(
+ name,
+ self,
+ frame=frame,
+ index=index,
+ if_exists=if_exists,
+ index_label=index_label,
+ schema=schema,
+ dtype=dtype,
+ )
+ table.create()
+ return table
+
+ def check_case_sensitive(
+ self,
+ name: str,
+ schema: str | None,
+ ) -> None:
+ """
+ Checks table name for issues with case-sensitivity.
+ Method is called after data is inserted.
+ """
+ if not name.isdigit() and not name.islower():
+ # check for potentially case sensitivity issues (GH7815)
+ # Only check when name is not a number and name is not lower case
+ from sqlalchemy import inspect as sqlalchemy_inspect
+
+ insp = sqlalchemy_inspect(self.con)
+ table_names = insp.get_table_names(schema=schema or self.meta.schema)
+ if name not in table_names:
+ msg = (
+ f"The provided table name '{name}' is not found exactly as "
+ "such in the database after writing the table, possibly "
+ "due to case sensitivity issues. Consider using lower "
+ "case table names."
+ )
+ warnings.warn(
+ msg,
+ UserWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ def to_sql(
+ self,
+ frame,
+ name: str,
+ if_exists: Literal["fail", "replace", "append"] = "fail",
+ index: bool = True,
+ index_label=None,
+ schema: str | None = None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ method: Literal["multi"] | Callable | None = None,
+ engine: str = "auto",
+ **engine_kwargs,
+ ) -> int | None:
+ """
+ Write records stored in a DataFrame to a SQL database.
+
+ Parameters
+ ----------
+ frame : DataFrame
+ name : string
+ Name of SQL table.
+ if_exists : {'fail', 'replace', 'append'}, default 'fail'
+ - fail: If table exists, do nothing.
+ - replace: If table exists, drop it, recreate it, and insert data.
+ - append: If table exists, insert data. Create if does not exist.
+ index : boolean, default True
+ Write DataFrame index as a column.
+ index_label : string or sequence, default None
+ Column label for index column(s). If None is given (default) and
+ `index` is True, then the index names are used.
+ A sequence should be given if the DataFrame uses MultiIndex.
+ schema : string, default None
+ Name of SQL schema in database to write to (if database flavor
+ supports this). If specified, this overwrites the default
+ schema of the SQLDatabase object.
+ chunksize : int, default None
+ If not None, then rows will be written in batches of this size at a
+ time. If None, all rows will be written at once.
+ dtype : single type or dict of column name to SQL type, default None
+ Optional specifying the datatype for columns. The SQL type should
+ be a SQLAlchemy type. If all columns are of the same type, one
+ single value can be used.
+ method : {None', 'multi', callable}, default None
+ Controls the SQL insertion clause used:
+
+ * None : Uses standard SQL ``INSERT`` clause (one per row).
+ * 'multi': Pass multiple values in a single ``INSERT`` clause.
+ * callable with signature ``(pd_table, conn, keys, data_iter)``.
+
+ Details and a sample callable implementation can be found in the
+ section :ref:`insert method `.
+ engine : {'auto', 'sqlalchemy'}, default 'auto'
+ SQL engine library to use. If 'auto', then the option
+ ``io.sql.engine`` is used. The default ``io.sql.engine``
+ behavior is 'sqlalchemy'
+
+ .. versionadded:: 1.3.0
+
+ **engine_kwargs
+ Any additional kwargs are passed to the engine.
+ """
+ sql_engine = get_engine(engine)
+
+ table = self.prep_table(
+ frame=frame,
+ name=name,
+ if_exists=if_exists,
+ index=index,
+ index_label=index_label,
+ schema=schema,
+ dtype=dtype,
+ )
+
+ total_inserted = sql_engine.insert_records(
+ table=table,
+ con=self.con,
+ frame=frame,
+ name=name,
+ index=index,
+ schema=schema,
+ chunksize=chunksize,
+ method=method,
+ **engine_kwargs,
+ )
+
+ self.check_case_sensitive(name=name, schema=schema)
+ return total_inserted
+
+ @property
+ def tables(self):
+ return self.meta.tables
+
+ def has_table(self, name: str, schema: str | None = None) -> bool:
+ from sqlalchemy import inspect as sqlalchemy_inspect
+
+ insp = sqlalchemy_inspect(self.con)
+ return insp.has_table(name, schema or self.meta.schema)
+
+ def get_table(self, table_name: str, schema: str | None = None) -> Table:
+ from sqlalchemy import (
+ Numeric,
+ Table,
+ )
+
+ schema = schema or self.meta.schema
+ tbl = Table(table_name, self.meta, autoload_with=self.con, schema=schema)
+ for column in tbl.columns:
+ if isinstance(column.type, Numeric):
+ column.type.asdecimal = False
+ return tbl
+
+ def drop_table(self, table_name: str, schema: str | None = None) -> None:
+ schema = schema or self.meta.schema
+ if self.has_table(table_name, schema):
+ self.meta.reflect(
+ bind=self.con, only=[table_name], schema=schema, views=True
+ )
+ with self.run_transaction():
+ self.get_table(table_name, schema).drop(bind=self.con)
+ self.meta.clear()
+
+ def _create_sql_schema(
+ self,
+ frame: DataFrame,
+ table_name: str,
+ keys: list[str] | None = None,
+ dtype: DtypeArg | None = None,
+ schema: str | None = None,
+ ) -> str:
+ table = SQLTable(
+ table_name,
+ self,
+ frame=frame,
+ index=False,
+ keys=keys,
+ dtype=dtype,
+ schema=schema,
+ )
+ return str(table.sql_schema())
+
+
+# ---- SQL without SQLAlchemy ---
+
+
+class ADBCDatabase(PandasSQL):
+ """
+ This class enables conversion between DataFrame and SQL databases
+ using ADBC to handle DataBase abstraction.
+
+ Parameters
+ ----------
+ con : adbc_driver_manager.dbapi.Connection
+ """
+
+ def __init__(self, con) -> None:
+ self.con = con
+
+ @contextmanager
+ def run_transaction(self):
+ with self.con.cursor() as cur:
+ try:
+ yield cur
+ except Exception:
+ self.con.rollback()
+ raise
+ self.con.commit()
+
+ def execute(self, sql: str | Select | TextClause, params=None):
+ if not isinstance(sql, str):
+ raise TypeError("Query must be a string unless using sqlalchemy.")
+ args = [] if params is None else [params]
+ cur = self.con.cursor()
+ try:
+ cur.execute(sql, *args)
+ return cur
+ except Exception as exc:
+ try:
+ self.con.rollback()
+ except Exception as inner_exc: # pragma: no cover
+ ex = DatabaseError(
+ f"Execution failed on sql: {sql}\n{exc}\nunable to rollback"
+ )
+ raise ex from inner_exc
+
+ ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}")
+ raise ex from exc
+
+ def read_table(
+ self,
+ table_name: str,
+ index_col: str | list[str] | None = None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ columns=None,
+ schema: str | None = None,
+ chunksize: int | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ) -> DataFrame | Iterator[DataFrame]:
+ """
+ Read SQL database table into a DataFrame.
+
+ Parameters
+ ----------
+ table_name : str
+ Name of SQL table in database.
+ coerce_float : bool, default True
+ Raises NotImplementedError
+ parse_dates : list or dict, default: None
+ - List of column names to parse as dates.
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times, or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps.
+ - Dict of ``{column_name: arg}``, where the arg corresponds
+ to the keyword arguments of :func:`pandas.to_datetime`.
+ Especially useful with databases without native Datetime support,
+ such as SQLite.
+ columns : list, default: None
+ List of column names to select from SQL table.
+ schema : string, default None
+ Name of SQL schema in database to query (if database flavor
+ supports this). If specified, this overwrites the default
+ schema of the SQL database object.
+ chunksize : int, default None
+ Raises NotImplementedError
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ Returns
+ -------
+ DataFrame
+
+ See Also
+ --------
+ pandas.read_sql_table
+ SQLDatabase.read_query
+
+ """
+ if coerce_float is not True:
+ raise NotImplementedError(
+ "'coerce_float' is not implemented for ADBC drivers"
+ )
+ if chunksize:
+ raise NotImplementedError("'chunksize' is not implemented for ADBC drivers")
+
+ if columns:
+ if index_col:
+ index_select = maybe_make_list(index_col)
+ else:
+ index_select = []
+ to_select = index_select + columns
+ select_list = ", ".join(f'"{x}"' for x in to_select)
+ else:
+ select_list = "*"
+ if schema:
+ stmt = f"SELECT {select_list} FROM {schema}.{table_name}"
+ else:
+ stmt = f"SELECT {select_list} FROM {table_name}"
+
+ mapping: type[ArrowDtype] | None | Callable
+ if dtype_backend == "pyarrow":
+ mapping = ArrowDtype
+ elif dtype_backend == "numpy_nullable":
+ from pandas.io._util import _arrow_dtype_mapping
+
+ mapping = _arrow_dtype_mapping().get
+ elif using_pyarrow_string_dtype():
+ from pandas.io._util import arrow_string_types_mapper
+
+ arrow_string_types_mapper()
+ else:
+ mapping = None
+
+ with self.con.cursor() as cur:
+ cur.execute(stmt)
+ df = cur.fetch_arrow_table().to_pandas(types_mapper=mapping)
+
+ return _wrap_result_adbc(
+ df,
+ index_col=index_col,
+ parse_dates=parse_dates,
+ )
+
+ def read_query(
+ self,
+ sql: str,
+ index_col: str | list[str] | None = None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ params=None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ) -> DataFrame | Iterator[DataFrame]:
+ """
+ Read SQL query into a DataFrame.
+
+ Parameters
+ ----------
+ sql : str
+ SQL query to be executed.
+ index_col : string, optional, default: None
+ Column name to use as index for the returned DataFrame object.
+ coerce_float : bool, default True
+ Raises NotImplementedError
+ params : list, tuple or dict, optional, default: None
+ Raises NotImplementedError
+ parse_dates : list or dict, default: None
+ - List of column names to parse as dates.
+ - Dict of ``{column_name: format string}`` where format string is
+ strftime compatible in case of parsing string times, or is one of
+ (D, s, ns, ms, us) in case of parsing integer timestamps.
+ - Dict of ``{column_name: arg dict}``, where the arg dict
+ corresponds to the keyword arguments of
+ :func:`pandas.to_datetime` Especially useful with databases
+ without native Datetime support, such as SQLite.
+ chunksize : int, default None
+ Raises NotImplementedError
+ dtype : Type name or dict of columns
+ Data type for data or columns. E.g. np.float64 or
+ {'a': np.float64, 'b': np.int32, 'c': 'Int64'}
+
+ .. versionadded:: 1.3.0
+
+ Returns
+ -------
+ DataFrame
+
+ See Also
+ --------
+ read_sql_table : Read SQL database table into a DataFrame.
+ read_sql
+
+ """
+ if coerce_float is not True:
+ raise NotImplementedError(
+ "'coerce_float' is not implemented for ADBC drivers"
+ )
+ if params:
+ raise NotImplementedError("'params' is not implemented for ADBC drivers")
+ if chunksize:
+ raise NotImplementedError("'chunksize' is not implemented for ADBC drivers")
+
+ mapping: type[ArrowDtype] | None | Callable
+ if dtype_backend == "pyarrow":
+ mapping = ArrowDtype
+ elif dtype_backend == "numpy_nullable":
+ from pandas.io._util import _arrow_dtype_mapping
+
+ mapping = _arrow_dtype_mapping().get
+ else:
+ mapping = None
+
+ with self.con.cursor() as cur:
+ cur.execute(sql)
+ df = cur.fetch_arrow_table().to_pandas(types_mapper=mapping)
+
+ return _wrap_result_adbc(
+ df,
+ index_col=index_col,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ )
+
+ read_sql = read_query
+
+ def to_sql(
+ self,
+ frame,
+ name: str,
+ if_exists: Literal["fail", "replace", "append"] = "fail",
+ index: bool = True,
+ index_label=None,
+ schema: str | None = None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ method: Literal["multi"] | Callable | None = None,
+ engine: str = "auto",
+ **engine_kwargs,
+ ) -> int | None:
+ """
+ Write records stored in a DataFrame to a SQL database.
+
+ Parameters
+ ----------
+ frame : DataFrame
+ name : string
+ Name of SQL table.
+ if_exists : {'fail', 'replace', 'append'}, default 'fail'
+ - fail: If table exists, do nothing.
+ - replace: If table exists, drop it, recreate it, and insert data.
+ - append: If table exists, insert data. Create if does not exist.
+ index : boolean, default True
+ Write DataFrame index as a column.
+ index_label : string or sequence, default None
+ Raises NotImplementedError
+ schema : string, default None
+ Name of SQL schema in database to write to (if database flavor
+ supports this). If specified, this overwrites the default
+ schema of the SQLDatabase object.
+ chunksize : int, default None
+ Raises NotImplementedError
+ dtype : single type or dict of column name to SQL type, default None
+ Raises NotImplementedError
+ method : {None', 'multi', callable}, default None
+ Raises NotImplementedError
+ engine : {'auto', 'sqlalchemy'}, default 'auto'
+ Raises NotImplementedError if not set to 'auto'
+ """
+ if index_label:
+ raise NotImplementedError(
+ "'index_label' is not implemented for ADBC drivers"
+ )
+ if chunksize:
+ raise NotImplementedError("'chunksize' is not implemented for ADBC drivers")
+ if dtype:
+ raise NotImplementedError("'dtype' is not implemented for ADBC drivers")
+ if method:
+ raise NotImplementedError("'method' is not implemented for ADBC drivers")
+ if engine != "auto":
+ raise NotImplementedError(
+ "engine != 'auto' not implemented for ADBC drivers"
+ )
+
+ if schema:
+ table_name = f"{schema}.{name}"
+ else:
+ table_name = name
+
+ # pandas if_exists="append" will still create the
+ # table if it does not exist; ADBC is more explicit with append/create
+ # as applicable modes, so the semantics get blurred across
+ # the libraries
+ mode = "create"
+ if self.has_table(name, schema):
+ if if_exists == "fail":
+ raise ValueError(f"Table '{table_name}' already exists.")
+ elif if_exists == "replace":
+ with self.con.cursor() as cur:
+ cur.execute(f"DROP TABLE {table_name}")
+ elif if_exists == "append":
+ mode = "append"
+
+ import pyarrow as pa
+
+ try:
+ tbl = pa.Table.from_pandas(frame, preserve_index=index)
+ except pa.ArrowNotImplementedError as exc:
+ raise ValueError("datatypes not supported") from exc
+
+ with self.con.cursor() as cur:
+ total_inserted = cur.adbc_ingest(
+ table_name=name, data=tbl, mode=mode, db_schema_name=schema
+ )
+
+ self.con.commit()
+ return total_inserted
+
+ def has_table(self, name: str, schema: str | None = None) -> bool:
+ meta = self.con.adbc_get_objects(
+ db_schema_filter=schema, table_name_filter=name
+ ).read_all()
+
+ for catalog_schema in meta["catalog_db_schemas"].to_pylist():
+ if not catalog_schema:
+ continue
+ for schema_record in catalog_schema:
+ if not schema_record:
+ continue
+
+ for table_record in schema_record["db_schema_tables"]:
+ if table_record["table_name"] == name:
+ return True
+
+ return False
+
+ def _create_sql_schema(
+ self,
+ frame: DataFrame,
+ table_name: str,
+ keys: list[str] | None = None,
+ dtype: DtypeArg | None = None,
+ schema: str | None = None,
+ ) -> str:
+ raise NotImplementedError("not implemented for adbc")
+
+
+# sqlite-specific sql strings and handler class
+# dictionary used for readability purposes
+_SQL_TYPES = {
+ "string": "TEXT",
+ "floating": "REAL",
+ "integer": "INTEGER",
+ "datetime": "TIMESTAMP",
+ "date": "DATE",
+ "time": "TIME",
+ "boolean": "INTEGER",
+}
+
+
+def _get_unicode_name(name: object):
+ try:
+ uname = str(name).encode("utf-8", "strict").decode("utf-8")
+ except UnicodeError as err:
+ raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
+ return uname
+
+
+def _get_valid_sqlite_name(name: object):
+ # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
+ # -for-sqlite-table-column-names-in-python
+ # Ensure the string can be encoded as UTF-8.
+ # Ensure the string does not include any NUL characters.
+ # Replace all " with "".
+ # Wrap the entire thing in double quotes.
+
+ uname = _get_unicode_name(name)
+ if not len(uname):
+ raise ValueError("Empty table or column name specified")
+
+ nul_index = uname.find("\x00")
+ if nul_index >= 0:
+ raise ValueError("SQLite identifier cannot contain NULs")
+ return '"' + uname.replace('"', '""') + '"'
+
+
+class SQLiteTable(SQLTable):
+ """
+ Patch the SQLTable for fallback support.
+ Instead of a table variable just use the Create Table statement.
+ """
+
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+
+ self._register_date_adapters()
+
+ def _register_date_adapters(self) -> None:
+ # GH 8341
+ # register an adapter callable for datetime.time object
+ import sqlite3
+
+ # this will transform time(12,34,56,789) into '12:34:56.000789'
+ # (this is what sqlalchemy does)
+ def _adapt_time(t) -> str:
+ # This is faster than strftime
+ return f"{t.hour:02d}:{t.minute:02d}:{t.second:02d}.{t.microsecond:06d}"
+
+ # Also register adapters for date/datetime and co
+ # xref https://docs.python.org/3.12/library/sqlite3.html#adapter-and-converter-recipes
+ # Python 3.12+ doesn't auto-register adapters for us anymore
+
+ adapt_date_iso = lambda val: val.isoformat()
+ adapt_datetime_iso = lambda val: val.isoformat(" ")
+
+ sqlite3.register_adapter(time, _adapt_time)
+
+ sqlite3.register_adapter(date, adapt_date_iso)
+ sqlite3.register_adapter(datetime, adapt_datetime_iso)
+
+ convert_date = lambda val: date.fromisoformat(val.decode())
+ convert_timestamp = lambda val: datetime.fromisoformat(val.decode())
+
+ sqlite3.register_converter("date", convert_date)
+ sqlite3.register_converter("timestamp", convert_timestamp)
+
+ def sql_schema(self) -> str:
+ return str(";\n".join(self.table))
+
+ def _execute_create(self) -> None:
+ with self.pd_sql.run_transaction() as conn:
+ for stmt in self.table:
+ conn.execute(stmt)
+
+ def insert_statement(self, *, num_rows: int) -> str:
+ names = list(map(str, self.frame.columns))
+ wld = "?" # wildcard char
+ escape = _get_valid_sqlite_name
+
+ if self.index is not None:
+ for idx in self.index[::-1]:
+ names.insert(0, idx)
+
+ bracketed_names = [escape(column) for column in names]
+ col_names = ",".join(bracketed_names)
+
+ row_wildcards = ",".join([wld] * len(names))
+ wildcards = ",".join([f"({row_wildcards})" for _ in range(num_rows)])
+ insert_statement = (
+ f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
+ )
+ return insert_statement
+
+ def _execute_insert(self, conn, keys, data_iter) -> int:
+ data_list = list(data_iter)
+ conn.executemany(self.insert_statement(num_rows=1), data_list)
+ return conn.rowcount
+
+ def _execute_insert_multi(self, conn, keys, data_iter) -> int:
+ data_list = list(data_iter)
+ flattened_data = [x for row in data_list for x in row]
+ conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
+ return conn.rowcount
+
+ def _create_table_setup(self):
+ """
+ Return a list of SQL statements that creates a table reflecting the
+ structure of a DataFrame. The first entry will be a CREATE TABLE
+ statement while the rest will be CREATE INDEX statements.
+ """
+ column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
+ escape = _get_valid_sqlite_name
+
+ create_tbl_stmts = [
+ escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
+ ]
+
+ if self.keys is not None and len(self.keys):
+ if not is_list_like(self.keys):
+ keys = [self.keys]
+ else:
+ keys = self.keys
+ cnames_br = ", ".join([escape(c) for c in keys])
+ create_tbl_stmts.append(
+ f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
+ )
+ if self.schema:
+ schema_name = self.schema + "."
+ else:
+ schema_name = ""
+ create_stmts = [
+ "CREATE TABLE "
+ + schema_name
+ + escape(self.name)
+ + " (\n"
+ + ",\n ".join(create_tbl_stmts)
+ + "\n)"
+ ]
+
+ ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
+ if len(ix_cols):
+ cnames = "_".join(ix_cols)
+ cnames_br = ",".join([escape(c) for c in ix_cols])
+ create_stmts.append(
+ "CREATE INDEX "
+ + escape("ix_" + self.name + "_" + cnames)
+ + "ON "
+ + escape(self.name)
+ + " ("
+ + cnames_br
+ + ")"
+ )
+
+ return create_stmts
+
+ def _sql_type_name(self, col):
+ dtype: DtypeArg = self.dtype or {}
+ if is_dict_like(dtype):
+ dtype = cast(dict, dtype)
+ if col.name in dtype:
+ return dtype[col.name]
+
+ # Infer type of column, while ignoring missing values.
+ # Needed for inserting typed data containing NULLs, GH 8778.
+ col_type = lib.infer_dtype(col, skipna=True)
+
+ if col_type == "timedelta64":
+ warnings.warn(
+ "the 'timedelta' type is not supported, and will be "
+ "written as integer values (ns frequency) to the database.",
+ UserWarning,
+ stacklevel=find_stack_level(),
+ )
+ col_type = "integer"
+
+ elif col_type == "datetime64":
+ col_type = "datetime"
+
+ elif col_type == "empty":
+ col_type = "string"
+
+ elif col_type == "complex":
+ raise ValueError("Complex datatypes not supported")
+
+ if col_type not in _SQL_TYPES:
+ col_type = "string"
+
+ return _SQL_TYPES[col_type]
+
+
+class SQLiteDatabase(PandasSQL):
+ """
+ Version of SQLDatabase to support SQLite connections (fallback without
+ SQLAlchemy). This should only be used internally.
+
+ Parameters
+ ----------
+ con : sqlite connection object
+
+ """
+
+ def __init__(self, con) -> None:
+ self.con = con
+
+ @contextmanager
+ def run_transaction(self):
+ cur = self.con.cursor()
+ try:
+ yield cur
+ self.con.commit()
+ except Exception:
+ self.con.rollback()
+ raise
+ finally:
+ cur.close()
+
+ def execute(self, sql: str | Select | TextClause, params=None):
+ if not isinstance(sql, str):
+ raise TypeError("Query must be a string unless using sqlalchemy.")
+ args = [] if params is None else [params]
+ cur = self.con.cursor()
+ try:
+ cur.execute(sql, *args)
+ return cur
+ except Exception as exc:
+ try:
+ self.con.rollback()
+ except Exception as inner_exc: # pragma: no cover
+ ex = DatabaseError(
+ f"Execution failed on sql: {sql}\n{exc}\nunable to rollback"
+ )
+ raise ex from inner_exc
+
+ ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}")
+ raise ex from exc
+
+ @staticmethod
+ def _query_iterator(
+ cursor,
+ chunksize: int,
+ columns,
+ index_col=None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ dtype: DtypeArg | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ):
+ """Return generator through chunked result set"""
+ has_read_data = False
+ while True:
+ data = cursor.fetchmany(chunksize)
+ if type(data) == tuple:
+ data = list(data)
+ if not data:
+ cursor.close()
+ if not has_read_data:
+ result = DataFrame.from_records(
+ [], columns=columns, coerce_float=coerce_float
+ )
+ if dtype:
+ result = result.astype(dtype)
+ yield result
+ break
+
+ has_read_data = True
+ yield _wrap_result(
+ data,
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ dtype_backend=dtype_backend,
+ )
+
+ def read_query(
+ self,
+ sql,
+ index_col=None,
+ coerce_float: bool = True,
+ parse_dates=None,
+ params=None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
+ ) -> DataFrame | Iterator[DataFrame]:
+ cursor = self.execute(sql, params)
+ columns = [col_desc[0] for col_desc in cursor.description]
+
+ if chunksize is not None:
+ return self._query_iterator(
+ cursor,
+ chunksize,
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ dtype_backend=dtype_backend,
+ )
+ else:
+ data = self._fetchall_as_list(cursor)
+ cursor.close()
+
+ frame = _wrap_result(
+ data,
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ dtype_backend=dtype_backend,
+ )
+ return frame
+
+ def _fetchall_as_list(self, cur):
+ result = cur.fetchall()
+ if not isinstance(result, list):
+ result = list(result)
+ return result
+
+ def to_sql(
+ self,
+ frame,
+ name: str,
+ if_exists: str = "fail",
+ index: bool = True,
+ index_label=None,
+ schema=None,
+ chunksize: int | None = None,
+ dtype: DtypeArg | None = None,
+ method: Literal["multi"] | Callable | None = None,
+ engine: str = "auto",
+ **engine_kwargs,
+ ) -> int | None:
+ """
+ Write records stored in a DataFrame to a SQL database.
+
+ Parameters
+ ----------
+ frame: DataFrame
+ name: string
+ Name of SQL table.
+ if_exists: {'fail', 'replace', 'append'}, default 'fail'
+ fail: If table exists, do nothing.
+ replace: If table exists, drop it, recreate it, and insert data.
+ append: If table exists, insert data. Create if it does not exist.
+ index : bool, default True
+ Write DataFrame index as a column
+ index_label : string or sequence, default None
+ Column label for index column(s). If None is given (default) and
+ `index` is True, then the index names are used.
+ A sequence should be given if the DataFrame uses MultiIndex.
+ schema : string, default None
+ Ignored parameter included for compatibility with SQLAlchemy
+ version of ``to_sql``.
+ chunksize : int, default None
+ If not None, then rows will be written in batches of this
+ size at a time. If None, all rows will be written at once.
+ dtype : single type or dict of column name to SQL type, default None
+ Optional specifying the datatype for columns. The SQL type should
+ be a string. If all columns are of the same type, one single value
+ can be used.
+ method : {None, 'multi', callable}, default None
+ Controls the SQL insertion clause used:
+
+ * None : Uses standard SQL ``INSERT`` clause (one per row).
+ * 'multi': Pass multiple values in a single ``INSERT`` clause.
+ * callable with signature ``(pd_table, conn, keys, data_iter)``.
+
+ Details and a sample callable implementation can be found in the
+ section :ref:`insert method `.
+ """
+ if dtype:
+ if not is_dict_like(dtype):
+ # error: Value expression in dictionary comprehension has incompatible
+ # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
+ # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
+ # Type[str], Type[float], Type[int], Type[complex], Type[bool],
+ # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
+ # dtype[Any], Type[object]]"
+ dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
+ else:
+ dtype = cast(dict, dtype)
+
+ for col, my_type in dtype.items():
+ if not isinstance(my_type, str):
+ raise ValueError(f"{col} ({my_type}) not a string")
+
+ table = SQLiteTable(
+ name,
+ self,
+ frame=frame,
+ index=index,
+ if_exists=if_exists,
+ index_label=index_label,
+ dtype=dtype,
+ )
+ table.create()
+ return table.insert(chunksize, method)
+
+ def has_table(self, name: str, schema: str | None = None) -> bool:
+ wld = "?"
+ query = f"""
+ SELECT
+ name
+ FROM
+ sqlite_master
+ WHERE
+ type IN ('table', 'view')
+ AND name={wld};
+ """
+
+ return len(self.execute(query, [name]).fetchall()) > 0
+
+ def get_table(self, table_name: str, schema: str | None = None) -> None:
+ return None # not supported in fallback mode
+
+ def drop_table(self, name: str, schema: str | None = None) -> None:
+ drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
+ self.execute(drop_sql)
+
+ def _create_sql_schema(
+ self,
+ frame,
+ table_name: str,
+ keys=None,
+ dtype: DtypeArg | None = None,
+ schema: str | None = None,
+ ) -> str:
+ table = SQLiteTable(
+ table_name,
+ self,
+ frame=frame,
+ index=False,
+ keys=keys,
+ dtype=dtype,
+ schema=schema,
+ )
+ return str(table.sql_schema())
+
+
+def get_schema(
+ frame,
+ name: str,
+ keys=None,
+ con=None,
+ dtype: DtypeArg | None = None,
+ schema: str | None = None,
+) -> str:
+ """
+ Get the SQL db table schema for the given frame.
+
+ Parameters
+ ----------
+ frame : DataFrame
+ name : str
+ name of SQL table
+ keys : string or sequence, default: None
+ columns to use a primary key
+ con: ADBC Connection, SQLAlchemy connectable, sqlite3 connection, default: None
+ ADBC provides high performance I/O with native type support, where available.
+ Using SQLAlchemy makes it possible to use any DB supported by that
+ library
+ If a DBAPI2 object, only sqlite3 is supported.
+ dtype : dict of column name to SQL type, default None
+ Optional specifying the datatype for columns. The SQL type should
+ be a SQLAlchemy type, or a string for sqlite3 fallback connection.
+ schema: str, default: None
+ Optional specifying the schema to be used in creating the table.
+ """
+ with pandasSQL_builder(con=con) as pandas_sql:
+ return pandas_sql._create_sql_schema(
+ frame, name, keys=keys, dtype=dtype, schema=schema
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/xml.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/xml.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac497cd266027f7af71884996182e1725baba361
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/xml.py
@@ -0,0 +1,1177 @@
+"""
+:mod:``pandas.io.xml`` is a module for reading XML.
+"""
+
+from __future__ import annotations
+
+import io
+from os import PathLike
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+)
+import warnings
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import (
+ AbstractMethodError,
+ ParserError,
+)
+from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.common import is_list_like
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.common import (
+ file_exists,
+ get_handle,
+ infer_compression,
+ is_file_like,
+ is_fsspec_url,
+ is_url,
+ stringify_path,
+)
+from pandas.io.parsers import TextParser
+
+if TYPE_CHECKING:
+ from collections.abc import Sequence
+ from xml.etree.ElementTree import Element
+
+ from lxml import etree
+
+ from pandas._typing import (
+ CompressionOptions,
+ ConvertersArg,
+ DtypeArg,
+ DtypeBackend,
+ FilePath,
+ ParseDatesArg,
+ ReadBuffer,
+ StorageOptions,
+ XMLParsers,
+ )
+
+ from pandas import DataFrame
+
+
+@doc(
+ storage_options=_shared_docs["storage_options"],
+ decompression_options=_shared_docs["decompression_options"] % "path_or_buffer",
+)
+class _XMLFrameParser:
+ """
+ Internal subclass to parse XML into DataFrames.
+
+ Parameters
+ ----------
+ path_or_buffer : a valid JSON ``str``, path object or file-like object
+ Any valid string path is acceptable. The string could be a URL. Valid
+ URL schemes include http, ftp, s3, and file.
+
+ xpath : str or regex
+ The ``XPath`` expression to parse required set of nodes for
+ migration to :class:`~pandas.DataFrame`. ``etree`` supports limited ``XPath``.
+
+ namespaces : dict
+ The namespaces defined in XML document (``xmlns:namespace='URI'``)
+ as dicts with key being namespace and value the URI.
+
+ elems_only : bool
+ Parse only the child elements at the specified ``xpath``.
+
+ attrs_only : bool
+ Parse only the attributes at the specified ``xpath``.
+
+ names : list
+ Column names for :class:`~pandas.DataFrame` of parsed XML data.
+
+ dtype : dict
+ Data type for data or columns. E.g. {{'a': np.float64,
+ 'b': np.int32, 'c': 'Int64'}}
+
+ .. versionadded:: 1.5.0
+
+ converters : dict, optional
+ Dict of functions for converting values in certain columns. Keys can
+ either be integers or column labels.
+
+ .. versionadded:: 1.5.0
+
+ parse_dates : bool or list of int or names or list of lists or dict
+ Converts either index or select columns to datetimes
+
+ .. versionadded:: 1.5.0
+
+ encoding : str
+ Encoding of xml object or document.
+
+ stylesheet : str or file-like
+ URL, file, file-like object, or a raw string containing XSLT,
+ ``etree`` does not support XSLT but retained for consistency.
+
+ iterparse : dict, optional
+ Dict with row element as key and list of descendant elements
+ and/or attributes as value to be retrieved in iterparsing of
+ XML document.
+
+ .. versionadded:: 1.5.0
+
+ {decompression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ {storage_options}
+
+ See also
+ --------
+ pandas.io.xml._EtreeFrameParser
+ pandas.io.xml._LxmlFrameParser
+
+ Notes
+ -----
+ To subclass this class effectively you must override the following methods:`
+ * :func:`parse_data`
+ * :func:`_parse_nodes`
+ * :func:`_iterparse_nodes`
+ * :func:`_parse_doc`
+ * :func:`_validate_names`
+ * :func:`_validate_path`
+
+
+ See each method's respective documentation for details on their
+ functionality.
+ """
+
+ def __init__(
+ self,
+ path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],
+ xpath: str,
+ namespaces: dict[str, str] | None,
+ elems_only: bool,
+ attrs_only: bool,
+ names: Sequence[str] | None,
+ dtype: DtypeArg | None,
+ converters: ConvertersArg | None,
+ parse_dates: ParseDatesArg | None,
+ encoding: str | None,
+ stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None,
+ iterparse: dict[str, list[str]] | None,
+ compression: CompressionOptions,
+ storage_options: StorageOptions,
+ ) -> None:
+ self.path_or_buffer = path_or_buffer
+ self.xpath = xpath
+ self.namespaces = namespaces
+ self.elems_only = elems_only
+ self.attrs_only = attrs_only
+ self.names = names
+ self.dtype = dtype
+ self.converters = converters
+ self.parse_dates = parse_dates
+ self.encoding = encoding
+ self.stylesheet = stylesheet
+ self.iterparse = iterparse
+ self.is_style = None
+ self.compression: CompressionOptions = compression
+ self.storage_options = storage_options
+
+ def parse_data(self) -> list[dict[str, str | None]]:
+ """
+ Parse xml data.
+
+ This method will call the other internal methods to
+ validate ``xpath``, names, parse and return specific nodes.
+ """
+
+ raise AbstractMethodError(self)
+
+ def _parse_nodes(self, elems: list[Any]) -> list[dict[str, str | None]]:
+ """
+ Parse xml nodes.
+
+ This method will parse the children and attributes of elements
+ in ``xpath``, conditionally for only elements, only attributes
+ or both while optionally renaming node names.
+
+ Raises
+ ------
+ ValueError
+ * If only elements and only attributes are specified.
+
+ Notes
+ -----
+ Namespace URIs will be removed from return node values. Also,
+ elements with missing children or attributes compared to siblings
+ will have optional keys filled with None values.
+ """
+
+ dicts: list[dict[str, str | None]]
+
+ if self.elems_only and self.attrs_only:
+ raise ValueError("Either element or attributes can be parsed not both.")
+ if self.elems_only:
+ if self.names:
+ dicts = [
+ {
+ **(
+ {el.tag: el.text}
+ if el.text and not el.text.isspace()
+ else {}
+ ),
+ **{
+ nm: ch.text if ch.text else None
+ for nm, ch in zip(self.names, el.findall("*"))
+ },
+ }
+ for el in elems
+ ]
+ else:
+ dicts = [
+ {ch.tag: ch.text if ch.text else None for ch in el.findall("*")}
+ for el in elems
+ ]
+
+ elif self.attrs_only:
+ dicts = [
+ {k: v if v else None for k, v in el.attrib.items()} for el in elems
+ ]
+
+ elif self.names:
+ dicts = [
+ {
+ **el.attrib,
+ **({el.tag: el.text} if el.text and not el.text.isspace() else {}),
+ **{
+ nm: ch.text if ch.text else None
+ for nm, ch in zip(self.names, el.findall("*"))
+ },
+ }
+ for el in elems
+ ]
+
+ else:
+ dicts = [
+ {
+ **el.attrib,
+ **({el.tag: el.text} if el.text and not el.text.isspace() else {}),
+ **{ch.tag: ch.text if ch.text else None for ch in el.findall("*")},
+ }
+ for el in elems
+ ]
+
+ dicts = [
+ {k.split("}")[1] if "}" in k else k: v for k, v in d.items()} for d in dicts
+ ]
+
+ keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))
+ dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]
+
+ if self.names:
+ dicts = [dict(zip(self.names, d.values())) for d in dicts]
+
+ return dicts
+
+ def _iterparse_nodes(self, iterparse: Callable) -> list[dict[str, str | None]]:
+ """
+ Iterparse xml nodes.
+
+ This method will read in local disk, decompressed XML files for elements
+ and underlying descendants using iterparse, a method to iterate through
+ an XML tree without holding entire XML tree in memory.
+
+ Raises
+ ------
+ TypeError
+ * If ``iterparse`` is not a dict or its dict value is not list-like.
+ ParserError
+ * If ``path_or_buffer`` is not a physical file on disk or file-like object.
+ * If no data is returned from selected items in ``iterparse``.
+
+ Notes
+ -----
+ Namespace URIs will be removed from return node values. Also,
+ elements with missing children or attributes in submitted list
+ will have optional keys filled with None values.
+ """
+
+ dicts: list[dict[str, str | None]] = []
+ row: dict[str, str | None] | None = None
+
+ if not isinstance(self.iterparse, dict):
+ raise TypeError(
+ f"{type(self.iterparse).__name__} is not a valid type for iterparse"
+ )
+
+ row_node = next(iter(self.iterparse.keys())) if self.iterparse else ""
+ if not is_list_like(self.iterparse[row_node]):
+ raise TypeError(
+ f"{type(self.iterparse[row_node])} is not a valid type "
+ "for value in iterparse"
+ )
+
+ if (not hasattr(self.path_or_buffer, "read")) and (
+ not isinstance(self.path_or_buffer, (str, PathLike))
+ or is_url(self.path_or_buffer)
+ or is_fsspec_url(self.path_or_buffer)
+ or (
+ isinstance(self.path_or_buffer, str)
+ and self.path_or_buffer.startswith((" list[Any]:
+ """
+ Validate ``xpath``.
+
+ This method checks for syntax, evaluation, or empty nodes return.
+
+ Raises
+ ------
+ SyntaxError
+ * If xpah is not supported or issues with namespaces.
+
+ ValueError
+ * If xpah does not return any nodes.
+ """
+
+ raise AbstractMethodError(self)
+
+ def _validate_names(self) -> None:
+ """
+ Validate names.
+
+ This method will check if names is a list-like and aligns
+ with length of parse nodes.
+
+ Raises
+ ------
+ ValueError
+ * If value is not a list and less then length of nodes.
+ """
+ raise AbstractMethodError(self)
+
+ def _parse_doc(
+ self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]
+ ) -> Element | etree._Element:
+ """
+ Build tree from path_or_buffer.
+
+ This method will parse XML object into tree
+ either from string/bytes or file location.
+ """
+ raise AbstractMethodError(self)
+
+
+class _EtreeFrameParser(_XMLFrameParser):
+ """
+ Internal class to parse XML into DataFrames with the Python
+ standard library XML module: `xml.etree.ElementTree`.
+ """
+
+ def parse_data(self) -> list[dict[str, str | None]]:
+ from xml.etree.ElementTree import iterparse
+
+ if self.stylesheet is not None:
+ raise ValueError(
+ "To use stylesheet, you need lxml installed and selected as parser."
+ )
+
+ if self.iterparse is None:
+ self.xml_doc = self._parse_doc(self.path_or_buffer)
+ elems = self._validate_path()
+
+ self._validate_names()
+
+ xml_dicts: list[dict[str, str | None]] = (
+ self._parse_nodes(elems)
+ if self.iterparse is None
+ else self._iterparse_nodes(iterparse)
+ )
+
+ return xml_dicts
+
+ def _validate_path(self) -> list[Any]:
+ """
+ Notes
+ -----
+ ``etree`` supports limited ``XPath``. If user attempts a more complex
+ expression syntax error will raise.
+ """
+
+ msg = (
+ "xpath does not return any nodes or attributes. "
+ "Be sure to specify in `xpath` the parent nodes of "
+ "children and attributes to parse. "
+ "If document uses namespaces denoted with "
+ "xmlns, be sure to define namespaces and "
+ "use them in xpath."
+ )
+ try:
+ elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces)
+ children = [ch for el in elems for ch in el.findall("*")]
+ attrs = {k: v for el in elems for k, v in el.attrib.items()}
+
+ if elems is None:
+ raise ValueError(msg)
+
+ if elems is not None:
+ if self.elems_only and children == []:
+ raise ValueError(msg)
+ if self.attrs_only and attrs == {}:
+ raise ValueError(msg)
+ if children == [] and attrs == {}:
+ raise ValueError(msg)
+
+ except (KeyError, SyntaxError):
+ raise SyntaxError(
+ "You have used an incorrect or unsupported XPath "
+ "expression for etree library or you used an "
+ "undeclared namespace prefix."
+ )
+
+ return elems
+
+ def _validate_names(self) -> None:
+ children: list[Any]
+
+ if self.names:
+ if self.iterparse:
+ children = self.iterparse[next(iter(self.iterparse))]
+ else:
+ parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces)
+ children = parent.findall("*") if parent is not None else []
+
+ if is_list_like(self.names):
+ if len(self.names) < len(children):
+ raise ValueError(
+ "names does not match length of child elements in xpath."
+ )
+ else:
+ raise TypeError(
+ f"{type(self.names).__name__} is not a valid type for names"
+ )
+
+ def _parse_doc(
+ self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]
+ ) -> Element:
+ from xml.etree.ElementTree import (
+ XMLParser,
+ parse,
+ )
+
+ handle_data = get_data_from_filepath(
+ filepath_or_buffer=raw_doc,
+ encoding=self.encoding,
+ compression=self.compression,
+ storage_options=self.storage_options,
+ )
+
+ with preprocess_data(handle_data) as xml_data:
+ curr_parser = XMLParser(encoding=self.encoding)
+ document = parse(xml_data, parser=curr_parser)
+
+ return document.getroot()
+
+
+class _LxmlFrameParser(_XMLFrameParser):
+ """
+ Internal class to parse XML into :class:`~pandas.DataFrame` with third-party
+ full-featured XML library, ``lxml``, that supports
+ ``XPath`` 1.0 and XSLT 1.0.
+ """
+
+ def parse_data(self) -> list[dict[str, str | None]]:
+ """
+ Parse xml data.
+
+ This method will call the other internal methods to
+ validate ``xpath``, names, optionally parse and run XSLT,
+ and parse original or transformed XML and return specific nodes.
+ """
+ from lxml.etree import iterparse
+
+ if self.iterparse is None:
+ self.xml_doc = self._parse_doc(self.path_or_buffer)
+
+ if self.stylesheet:
+ self.xsl_doc = self._parse_doc(self.stylesheet)
+ self.xml_doc = self._transform_doc()
+
+ elems = self._validate_path()
+
+ self._validate_names()
+
+ xml_dicts: list[dict[str, str | None]] = (
+ self._parse_nodes(elems)
+ if self.iterparse is None
+ else self._iterparse_nodes(iterparse)
+ )
+
+ return xml_dicts
+
+ def _validate_path(self) -> list[Any]:
+ msg = (
+ "xpath does not return any nodes or attributes. "
+ "Be sure to specify in `xpath` the parent nodes of "
+ "children and attributes to parse. "
+ "If document uses namespaces denoted with "
+ "xmlns, be sure to define namespaces and "
+ "use them in xpath."
+ )
+
+ elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces)
+ children = [ch for el in elems for ch in el.xpath("*")]
+ attrs = {k: v for el in elems for k, v in el.attrib.items()}
+
+ if elems == []:
+ raise ValueError(msg)
+
+ if elems != []:
+ if self.elems_only and children == []:
+ raise ValueError(msg)
+ if self.attrs_only and attrs == {}:
+ raise ValueError(msg)
+ if children == [] and attrs == {}:
+ raise ValueError(msg)
+
+ return elems
+
+ def _validate_names(self) -> None:
+ children: list[Any]
+
+ if self.names:
+ if self.iterparse:
+ children = self.iterparse[next(iter(self.iterparse))]
+ else:
+ children = self.xml_doc.xpath(
+ self.xpath + "[1]/*", namespaces=self.namespaces
+ )
+
+ if is_list_like(self.names):
+ if len(self.names) < len(children):
+ raise ValueError(
+ "names does not match length of child elements in xpath."
+ )
+ else:
+ raise TypeError(
+ f"{type(self.names).__name__} is not a valid type for names"
+ )
+
+ def _parse_doc(
+ self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]
+ ) -> etree._Element:
+ from lxml.etree import (
+ XMLParser,
+ fromstring,
+ parse,
+ )
+
+ handle_data = get_data_from_filepath(
+ filepath_or_buffer=raw_doc,
+ encoding=self.encoding,
+ compression=self.compression,
+ storage_options=self.storage_options,
+ )
+
+ with preprocess_data(handle_data) as xml_data:
+ curr_parser = XMLParser(encoding=self.encoding)
+
+ if isinstance(xml_data, io.StringIO):
+ if self.encoding is None:
+ raise TypeError(
+ "Can not pass encoding None when input is StringIO."
+ )
+
+ document = fromstring(
+ xml_data.getvalue().encode(self.encoding), parser=curr_parser
+ )
+ else:
+ document = parse(xml_data, parser=curr_parser)
+
+ return document
+
+ def _transform_doc(self) -> etree._XSLTResultTree:
+ """
+ Transform original tree using stylesheet.
+
+ This method will transform original xml using XSLT script into
+ am ideally flatter xml document for easier parsing and migration
+ to Data Frame.
+ """
+ from lxml.etree import XSLT
+
+ transformer = XSLT(self.xsl_doc)
+ new_doc = transformer(self.xml_doc)
+
+ return new_doc
+
+
+def get_data_from_filepath(
+ filepath_or_buffer: FilePath | bytes | ReadBuffer[bytes] | ReadBuffer[str],
+ encoding: str | None,
+ compression: CompressionOptions,
+ storage_options: StorageOptions,
+) -> str | bytes | ReadBuffer[bytes] | ReadBuffer[str]:
+ """
+ Extract raw XML data.
+
+ The method accepts three input types:
+ 1. filepath (string-like)
+ 2. file-like object (e.g. open file object, StringIO)
+ 3. XML string or bytes
+
+ This method turns (1) into (2) to simplify the rest of the processing.
+ It returns input types (2) and (3) unchanged.
+ """
+ if not isinstance(filepath_or_buffer, bytes):
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+
+ if (
+ isinstance(filepath_or_buffer, str)
+ and not filepath_or_buffer.startswith((" io.StringIO | io.BytesIO:
+ """
+ Convert extracted raw data.
+
+ This method will return underlying data of extracted XML content.
+ The data either has a `read` attribute (e.g. a file object or a
+ StringIO/BytesIO) or is a string or bytes that is an XML document.
+ """
+
+ if isinstance(data, str):
+ data = io.StringIO(data)
+
+ elif isinstance(data, bytes):
+ data = io.BytesIO(data)
+
+ return data
+
+
+def _data_to_frame(data, **kwargs) -> DataFrame:
+ """
+ Convert parsed data to Data Frame.
+
+ This method will bind xml dictionary data of keys and values
+ into named columns of Data Frame using the built-in TextParser
+ class that build Data Frame and infers specific dtypes.
+ """
+
+ tags = next(iter(data))
+ nodes = [list(d.values()) for d in data]
+
+ try:
+ with TextParser(nodes, names=tags, **kwargs) as tp:
+ return tp.read()
+ except ParserError:
+ raise ParserError(
+ "XML document may be too complex for import. "
+ "Try to flatten document and use distinct "
+ "element and attribute names."
+ )
+
+
+def _parse(
+ path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],
+ xpath: str,
+ namespaces: dict[str, str] | None,
+ elems_only: bool,
+ attrs_only: bool,
+ names: Sequence[str] | None,
+ dtype: DtypeArg | None,
+ converters: ConvertersArg | None,
+ parse_dates: ParseDatesArg | None,
+ encoding: str | None,
+ parser: XMLParsers,
+ stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None,
+ iterparse: dict[str, list[str]] | None,
+ compression: CompressionOptions,
+ storage_options: StorageOptions,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ **kwargs,
+) -> DataFrame:
+ """
+ Call internal parsers.
+
+ This method will conditionally call internal parsers:
+ LxmlFrameParser and/or EtreeParser.
+
+ Raises
+ ------
+ ImportError
+ * If lxml is not installed if selected as parser.
+
+ ValueError
+ * If parser is not lxml or etree.
+ """
+
+ p: _EtreeFrameParser | _LxmlFrameParser
+
+ if isinstance(path_or_buffer, str) and not any(
+ [
+ is_file_like(path_or_buffer),
+ file_exists(path_or_buffer),
+ is_url(path_or_buffer),
+ is_fsspec_url(path_or_buffer),
+ ]
+ ):
+ warnings.warn(
+ "Passing literal xml to 'read_xml' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "literal string, wrap it in a 'StringIO' object.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ if parser == "lxml":
+ lxml = import_optional_dependency("lxml.etree", errors="ignore")
+
+ if lxml is not None:
+ p = _LxmlFrameParser(
+ path_or_buffer,
+ xpath,
+ namespaces,
+ elems_only,
+ attrs_only,
+ names,
+ dtype,
+ converters,
+ parse_dates,
+ encoding,
+ stylesheet,
+ iterparse,
+ compression,
+ storage_options,
+ )
+ else:
+ raise ImportError("lxml not found, please install or use the etree parser.")
+
+ elif parser == "etree":
+ p = _EtreeFrameParser(
+ path_or_buffer,
+ xpath,
+ namespaces,
+ elems_only,
+ attrs_only,
+ names,
+ dtype,
+ converters,
+ parse_dates,
+ encoding,
+ stylesheet,
+ iterparse,
+ compression,
+ storage_options,
+ )
+ else:
+ raise ValueError("Values for parser can only be lxml or etree.")
+
+ data_dicts = p.parse_data()
+
+ return _data_to_frame(
+ data=data_dicts,
+ dtype=dtype,
+ converters=converters,
+ parse_dates=parse_dates,
+ dtype_backend=dtype_backend,
+ **kwargs,
+ )
+
+
+@doc(
+ storage_options=_shared_docs["storage_options"],
+ decompression_options=_shared_docs["decompression_options"] % "path_or_buffer",
+)
+def read_xml(
+ path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],
+ *,
+ xpath: str = "./*",
+ namespaces: dict[str, str] | None = None,
+ elems_only: bool = False,
+ attrs_only: bool = False,
+ names: Sequence[str] | None = None,
+ dtype: DtypeArg | None = None,
+ converters: ConvertersArg | None = None,
+ parse_dates: ParseDatesArg | None = None,
+ # encoding can not be None for lxml and StringIO input
+ encoding: str | None = "utf-8",
+ parser: XMLParsers = "lxml",
+ stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = None,
+ iterparse: dict[str, list[str]] | None = None,
+ compression: CompressionOptions = "infer",
+ storage_options: StorageOptions | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame:
+ r"""
+ Read XML document into a :class:`~pandas.DataFrame` object.
+
+ .. versionadded:: 1.3.0
+
+ Parameters
+ ----------
+ path_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a ``read()`` function. The string can be any valid XML
+ string or a path. The string can further be a URL. Valid URL schemes
+ include http, ftp, s3, and file.
+
+ .. deprecated:: 2.1.0
+ Passing xml literal strings is deprecated.
+ Wrap literal xml input in ``io.StringIO`` or ``io.BytesIO`` instead.
+
+ xpath : str, optional, default './\*'
+ The ``XPath`` to parse required set of nodes for migration to
+ :class:`~pandas.DataFrame`.``XPath`` should return a collection of elements
+ and not a single element. Note: The ``etree`` parser supports limited ``XPath``
+ expressions. For more complex ``XPath``, use ``lxml`` which requires
+ installation.
+
+ namespaces : dict, optional
+ The namespaces defined in XML document as dicts with key being
+ namespace prefix and value the URI. There is no need to include all
+ namespaces in XML, only the ones used in ``xpath`` expression.
+ Note: if XML document uses default namespace denoted as
+ `xmlns=''` without a prefix, you must assign any temporary
+ namespace prefix such as 'doc' to the URI in order to parse
+ underlying nodes and/or attributes. For example, ::
+
+ namespaces = {{"doc": "https://example.com"}}
+
+ elems_only : bool, optional, default False
+ Parse only the child elements at the specified ``xpath``. By default,
+ all child elements and non-empty text nodes are returned.
+
+ attrs_only : bool, optional, default False
+ Parse only the attributes at the specified ``xpath``.
+ By default, all attributes are returned.
+
+ names : list-like, optional
+ Column names for DataFrame of parsed XML data. Use this parameter to
+ rename original element names and distinguish same named elements and
+ attributes.
+
+ dtype : Type name or dict of column -> type, optional
+ Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
+ 'c': 'Int64'}}
+ Use `str` or `object` together with suitable `na_values` settings
+ to preserve and not interpret dtype.
+ If converters are specified, they will be applied INSTEAD
+ of dtype conversion.
+
+ .. versionadded:: 1.5.0
+
+ converters : dict, optional
+ Dict of functions for converting values in certain columns. Keys can either
+ be integers or column labels.
+
+ .. versionadded:: 1.5.0
+
+ parse_dates : bool or list of int or names or list of lists or dict, default False
+ Identifiers to parse index or columns to datetime. The behavior is as follows:
+
+ * boolean. If True -> try parsing the index.
+ * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
+ each as a separate date column.
+ * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
+ a single date column.
+ * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
+ result 'foo'
+
+ .. versionadded:: 1.5.0
+
+ encoding : str, optional, default 'utf-8'
+ Encoding of XML document.
+
+ parser : {{'lxml','etree'}}, default 'lxml'
+ Parser module to use for retrieval of data. Only 'lxml' and
+ 'etree' are supported. With 'lxml' more complex ``XPath`` searches
+ and ability to use XSLT stylesheet are supported.
+
+ stylesheet : str, path object or file-like object
+ A URL, file-like object, or a raw string containing an XSLT script.
+ This stylesheet should flatten complex, deeply nested XML documents
+ for easier parsing. To use this feature you must have ``lxml`` module
+ installed and specify 'lxml' as ``parser``. The ``xpath`` must
+ reference nodes of transformed XML document generated after XSLT
+ transformation and not the original XML document. Only XSLT 1.0
+ scripts and not later versions is currently supported.
+
+ iterparse : dict, optional
+ The nodes or attributes to retrieve in iterparsing of XML document
+ as a dict with key being the name of repeating element and value being
+ list of elements or attribute names that are descendants of the repeated
+ element. Note: If this option is used, it will replace ``xpath`` parsing
+ and unlike ``xpath``, descendants do not need to relate to each other but can
+ exist any where in document under the repeating element. This memory-
+ efficient method should be used for very large XML files (500MB, 1GB, or 5GB+).
+ For example, ::
+
+ iterparse = {{"row_element": ["child_elem", "attr", "grandchild_elem"]}}
+
+ .. versionadded:: 1.5.0
+
+ {decompression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ {storage_options}
+
+ dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ Returns
+ -------
+ df
+ A DataFrame.
+
+ See Also
+ --------
+ read_json : Convert a JSON string to pandas object.
+ read_html : Read HTML tables into a list of DataFrame objects.
+
+ Notes
+ -----
+ This method is best designed to import shallow XML documents in
+ following format which is the ideal fit for the two-dimensions of a
+ ``DataFrame`` (row by column). ::
+
+
+
+ data
+ data
+ data
+ ...
+
+
+ ...
+
+ ...
+
+
+ As a file format, XML documents can be designed any way including
+ layout of elements and attributes as long as it conforms to W3C
+ specifications. Therefore, this method is a convenience handler for
+ a specific flatter design and not all possible XML structures.
+
+ However, for more complex XML documents, ``stylesheet`` allows you to
+ temporarily redesign original document with XSLT (a special purpose
+ language) for a flatter version for migration to a DataFrame.
+
+ This function will *always* return a single :class:`DataFrame` or raise
+ exceptions due to issues with XML document, ``xpath``, or other
+ parameters.
+
+ See the :ref:`read_xml documentation in the IO section of the docs
+ ` for more information in using this method to parse XML
+ files to DataFrames.
+
+ Examples
+ --------
+ >>> from io import StringIO
+ >>> xml = '''
+ ...
+ ...
+ ... square
+ ... 360
+ ... 4.0
+ ...
+ ...
+ ... circle
+ ... 360
+ ...
+ ...
+ ...
+ ... triangle
+ ... 180
+ ... 3.0
+ ...
+ ... '''
+
+ >>> df = pd.read_xml(StringIO(xml))
+ >>> df
+ shape degrees sides
+ 0 square 360 4.0
+ 1 circle 360 NaN
+ 2 triangle 180 3.0
+
+ >>> xml = '''
+ ...
+ ...
+ ...
+ ...
+ ... '''
+
+ >>> df = pd.read_xml(StringIO(xml), xpath=".//row")
+ >>> df
+ shape degrees sides
+ 0 square 360 4.0
+ 1 circle 360 NaN
+ 2 triangle 180 3.0
+
+ >>> xml = '''
+ ...
+ ...
+ ... square
+ ... 360
+ ... 4.0
+ ...
+ ...
+ ... circle
+ ... 360
+ ...
+ ...
+ ...
+ ... triangle
+ ... 180
+ ... 3.0
+ ...
+ ... '''
+
+ >>> df = pd.read_xml(StringIO(xml),
+ ... xpath="//doc:row",
+ ... namespaces={{"doc": "https://example.com"}})
+ >>> df
+ shape degrees sides
+ 0 square 360 4.0
+ 1 circle 360 NaN
+ 2 triangle 180 3.0
+
+ >>> xml_data = '''
+ ...
+ ...
+ ... 0
+ ... 1
+ ... 2.5
+ ... True
+ ... a
+ ... 2019-12-31 00:00:00
+ ...
+ ...
+ ... 1
+ ... 4.5
+ ... False
+ ... b
+ ... 2019-12-31 00:00:00
+ ...
+ ...
+ ... '''
+
+ >>> df = pd.read_xml(StringIO(xml_data),
+ ... dtype_backend="numpy_nullable",
+ ... parse_dates=["e"])
+ >>> df
+ index a b c d e
+ 0 0 1 2.5 True a 2019-12-31
+ 1 1 4.5 False b 2019-12-31
+ """
+ check_dtype_backend(dtype_backend)
+
+ return _parse(
+ path_or_buffer=path_or_buffer,
+ xpath=xpath,
+ namespaces=namespaces,
+ elems_only=elems_only,
+ attrs_only=attrs_only,
+ names=names,
+ dtype=dtype,
+ converters=converters,
+ parse_dates=parse_dates,
+ encoding=encoding,
+ parser=parser,
+ stylesheet=stylesheet,
+ iterparse=iterparse,
+ compression=compression,
+ storage_options=storage_options,
+ dtype_backend=dtype_backend,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png
new file mode 100644
index 0000000000000000000000000000000000000000..2cb593b8e1cf68e429cc8402838c31f70be59afc
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b20b56fadc7471c0694d3e8148d9e28a83d7967bac16bf8852094afea3950414
+size 2114
| |