diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b01bc84941b7c2c5902b82bdcf3360232813d07f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f724a71535d22afda093830b2c02b1327b44fe1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..706bc3f06f72e5a0fc5d7b1efb0ffac2e533afbb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6bb84b831d0cc422d7139d76a718c50f94c9d194
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..627c999b7825b8aaea998a78c029ce610c5b4585
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c804b81c49e7c8abb406f2132909df6036df1c09
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/__init__.py
@@ -0,0 +1,13 @@
+# ruff: noqa: TCH004
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ # import modules that have public classes/functions
+ from pandas.io import (
+ formats,
+ json,
+ stata,
+ )
+
+ # mark only those modules as public
+ __all__ = ["formats", "json", "stata"]
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5c38ee7949de9ef4469c8a62f217be66e7e9966
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22745e855e2d1d2d87d1844a4fbdaf2a69b0a33d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..809ebb34bd76a5a853bbb05faded771a642b6caf
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a91de9e118a7650c97da4bb7d2ce9db775dfdf04
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6038cc4593b45b3c03d8b567a44b4d36f90a20c1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e95c6d00aca3749c9234607ade47b910578336e2
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..83fcb3b0b8f013bc2046980a737f21239b60633c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2dcda42d005681843480e2e673ba898ee88a8724
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..029dffe0248fb16f461d8326e106272c05d42813
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..05bd04fe4e7744c76a52cfa57d7dfc593bd670a7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..897d4c68950e4851df569e316f59db6d58a9fe96
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4246e1c3ee2aff4a9ffd2a83e9e91e11cf8bd8af
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..946c13069f10d82e3bee5735c81ce11819d9179d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..46af36b4548c26e81539862d51d79b679d6e520a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..83290f2591f77fa56fa9e9fe301f637131bb7cc9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f90fada5214a72fa6848f71b2b1990ddf8466b95
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/api.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e8b34a61dfc62992a37d9fab3263ee00a28d1fc
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/api.py
@@ -0,0 +1,65 @@
+"""
+Data IO api
+"""
+
+from pandas.io.clipboards import read_clipboard
+from pandas.io.excel import (
+ ExcelFile,
+ ExcelWriter,
+ read_excel,
+)
+from pandas.io.feather_format import read_feather
+from pandas.io.gbq import read_gbq
+from pandas.io.html import read_html
+from pandas.io.json import read_json
+from pandas.io.orc import read_orc
+from pandas.io.parquet import read_parquet
+from pandas.io.parsers import (
+ read_csv,
+ read_fwf,
+ read_table,
+)
+from pandas.io.pickle import (
+ read_pickle,
+ to_pickle,
+)
+from pandas.io.pytables import (
+ HDFStore,
+ read_hdf,
+)
+from pandas.io.sas import read_sas
+from pandas.io.spss import read_spss
+from pandas.io.sql import (
+ read_sql,
+ read_sql_query,
+ read_sql_table,
+)
+from pandas.io.stata import read_stata
+from pandas.io.xml import read_xml
+
+__all__ = [
+ "ExcelFile",
+ "ExcelWriter",
+ "HDFStore",
+ "read_clipboard",
+ "read_csv",
+ "read_excel",
+ "read_feather",
+ "read_fwf",
+ "read_gbq",
+ "read_hdf",
+ "read_html",
+ "read_json",
+ "read_orc",
+ "read_parquet",
+ "read_pickle",
+ "read_sas",
+ "read_spss",
+ "read_sql",
+ "read_sql_query",
+ "read_sql_table",
+ "read_stata",
+ "read_table",
+ "read_xml",
+ "to_pickle",
+]
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/common.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..72c9deeb54fc7aaab781b2870171cf983a47da1f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/common.py
@@ -0,0 +1,1267 @@
+"""Common IO api utilities"""
+from __future__ import annotations
+
+from abc import (
+ ABC,
+ abstractmethod,
+)
+import codecs
+from collections import defaultdict
+from collections.abc import (
+ Hashable,
+ Mapping,
+ Sequence,
+)
+import dataclasses
+import functools
+import gzip
+from io import (
+ BufferedIOBase,
+ BytesIO,
+ RawIOBase,
+ StringIO,
+ TextIOBase,
+ TextIOWrapper,
+)
+import mmap
+import os
+from pathlib import Path
+import re
+import tarfile
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ AnyStr,
+ DefaultDict,
+ Generic,
+ Literal,
+ TypeVar,
+ cast,
+ overload,
+)
+from urllib.parse import (
+ urljoin,
+ urlparse as parse_url,
+ uses_netloc,
+ uses_params,
+ uses_relative,
+)
+import warnings
+import zipfile
+
+from pandas._typing import (
+ BaseBuffer,
+ ReadCsvBuffer,
+)
+from pandas.compat import (
+ get_bz2_file,
+ get_lzma_file,
+)
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.common import (
+ is_bool,
+ is_file_like,
+ is_integer,
+ is_list_like,
+)
+from pandas.core.dtypes.generic import ABCMultiIndex
+
+from pandas.core.shared_docs import _shared_docs
+
+_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
+_VALID_URLS.discard("")
+_RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://")
+
+BaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer)
+
+
+if TYPE_CHECKING:
+ from types import TracebackType
+
+ from pandas._typing import (
+ CompressionDict,
+ CompressionOptions,
+ FilePath,
+ ReadBuffer,
+ StorageOptions,
+ WriteBuffer,
+ )
+
+ from pandas import MultiIndex
+
+
+@dataclasses.dataclass
+class IOArgs:
+ """
+ Return value of io/common.py:_get_filepath_or_buffer.
+ """
+
+ filepath_or_buffer: str | BaseBuffer
+ encoding: str
+ mode: str
+ compression: CompressionDict
+ should_close: bool = False
+
+
+@dataclasses.dataclass
+class IOHandles(Generic[AnyStr]):
+ """
+ Return value of io/common.py:get_handle
+
+ Can be used as a context manager.
+
+ This is used to easily close created buffers and to handle corner cases when
+ TextIOWrapper is inserted.
+
+ handle: The file handle to be used.
+ created_handles: All file handles that are created by get_handle
+ is_wrapped: Whether a TextIOWrapper needs to be detached.
+ """
+
+ # handle might not implement the IO-interface
+ handle: IO[AnyStr]
+ compression: CompressionDict
+ created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list)
+ is_wrapped: bool = False
+
+ def close(self) -> None:
+ """
+ Close all created buffers.
+
+ Note: If a TextIOWrapper was inserted, it is flushed and detached to
+ avoid closing the potentially user-created buffer.
+ """
+ if self.is_wrapped:
+ assert isinstance(self.handle, TextIOWrapper)
+ self.handle.flush()
+ self.handle.detach()
+ self.created_handles.remove(self.handle)
+ for handle in self.created_handles:
+ handle.close()
+ self.created_handles = []
+ self.is_wrapped = False
+
+ def __enter__(self) -> IOHandles[AnyStr]:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+
+def is_url(url: object) -> bool:
+ """
+ Check to see if a URL has a valid protocol.
+
+ Parameters
+ ----------
+ url : str or unicode
+
+ Returns
+ -------
+ isurl : bool
+ If `url` has a valid protocol return True otherwise False.
+ """
+ if not isinstance(url, str):
+ return False
+ return parse_url(url).scheme in _VALID_URLS
+
+
+@overload
+def _expand_user(filepath_or_buffer: str) -> str:
+ ...
+
+
+@overload
+def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT:
+ ...
+
+
+def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
+ """
+ Return the argument with an initial component of ~ or ~user
+ replaced by that user's home directory.
+
+ Parameters
+ ----------
+ filepath_or_buffer : object to be converted if possible
+
+ Returns
+ -------
+ expanded_filepath_or_buffer : an expanded filepath or the
+ input if not expandable
+ """
+ if isinstance(filepath_or_buffer, str):
+ return os.path.expanduser(filepath_or_buffer)
+ return filepath_or_buffer
+
+
+def validate_header_arg(header: object) -> None:
+ if header is None:
+ return
+ if is_integer(header):
+ header = cast(int, header)
+ if header < 0:
+ # GH 27779
+ raise ValueError(
+ "Passing negative integer to header is invalid. "
+ "For no header, use header=None instead"
+ )
+ return
+ if is_list_like(header, allow_sets=False):
+ header = cast(Sequence, header)
+ if not all(map(is_integer, header)):
+ raise ValueError("header must be integer or list of integers")
+ if any(i < 0 for i in header):
+ raise ValueError("cannot specify multi-index header with negative integers")
+ return
+ if is_bool(header):
+ raise TypeError(
+ "Passing a bool to header is invalid. Use header=None for no header or "
+ "header=int or list-like of ints to specify "
+ "the row(s) making up the column names"
+ )
+ # GH 16338
+ raise ValueError("header must be integer or list of integers")
+
+
+@overload
+def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str:
+ ...
+
+
+@overload
+def stringify_path(
+ filepath_or_buffer: BaseBufferT, convert_file_like: bool = ...
+) -> BaseBufferT:
+ ...
+
+
+def stringify_path(
+ filepath_or_buffer: FilePath | BaseBufferT,
+ convert_file_like: bool = False,
+) -> str | BaseBufferT:
+ """
+ Attempt to convert a path-like object to a string.
+
+ Parameters
+ ----------
+ filepath_or_buffer : object to be converted
+
+ Returns
+ -------
+ str_filepath_or_buffer : maybe a string version of the object
+
+ Notes
+ -----
+ Objects supporting the fspath protocol are coerced
+ according to its __fspath__ method.
+
+ Any other object is passed through unchanged, which includes bytes,
+ strings, buffers, or anything else that's not even path-like.
+ """
+ if not convert_file_like and is_file_like(filepath_or_buffer):
+ # GH 38125: some fsspec objects implement os.PathLike but have already opened a
+ # file. This prevents opening the file a second time. infer_compression calls
+ # this function with convert_file_like=True to infer the compression.
+ return cast(BaseBufferT, filepath_or_buffer)
+
+ if isinstance(filepath_or_buffer, os.PathLike):
+ filepath_or_buffer = filepath_or_buffer.__fspath__()
+ return _expand_user(filepath_or_buffer)
+
+
+def urlopen(*args, **kwargs):
+ """
+ Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
+ the stdlib.
+ """
+ import urllib.request
+
+ return urllib.request.urlopen(*args, **kwargs)
+
+
+def is_fsspec_url(url: FilePath | BaseBuffer) -> bool:
+ """
+ Returns true if the given URL looks like
+ something fsspec can handle
+ """
+ return (
+ isinstance(url, str)
+ and bool(_RFC_3986_PATTERN.match(url))
+ and not url.startswith(("http://", "https://"))
+ )
+
+
+@doc(
+ storage_options=_shared_docs["storage_options"],
+ compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",
+)
+def _get_filepath_or_buffer(
+ filepath_or_buffer: FilePath | BaseBuffer,
+ encoding: str = "utf-8",
+ compression: CompressionOptions | None = None,
+ mode: str = "r",
+ storage_options: StorageOptions | None = None,
+) -> IOArgs:
+ """
+ If the filepath_or_buffer is a url, translate and return the buffer.
+ Otherwise passthrough.
+
+ Parameters
+ ----------
+ filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
+ or buffer
+ {compression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ encoding : the encoding to use to decode bytes, default is 'utf-8'
+ mode : str, optional
+
+ {storage_options}
+
+
+ Returns the dataclass IOArgs.
+ """
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+
+ # handle compression dict
+ compression_method, compression = get_compression_method(compression)
+ compression_method = infer_compression(filepath_or_buffer, compression_method)
+
+ # GH21227 internal compression is not used for non-binary handles.
+ if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
+ warnings.warn(
+ "compression has no effect when passing a non-binary object as input.",
+ RuntimeWarning,
+ stacklevel=find_stack_level(),
+ )
+ compression_method = None
+
+ compression = dict(compression, method=compression_method)
+
+ # bz2 and xz do not write the byte order mark for utf-16 and utf-32
+ # print a warning when writing such files
+ if (
+ "w" in mode
+ and compression_method in ["bz2", "xz"]
+ and encoding in ["utf-16", "utf-32"]
+ ):
+ warnings.warn(
+ f"{compression} will not write the byte order mark for {encoding}",
+ UnicodeWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ # Use binary mode when converting path-like objects to file-like objects (fsspec)
+ # except when text mode is explicitly requested. The original mode is returned if
+ # fsspec is not used.
+ fsspec_mode = mode
+ if "t" not in fsspec_mode and "b" not in fsspec_mode:
+ fsspec_mode += "b"
+
+ if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
+ # TODO: fsspec can also handle HTTP via requests, but leaving this
+ # unchanged. using fsspec appears to break the ability to infer if the
+ # server responded with gzipped data
+ storage_options = storage_options or {}
+
+ # waiting until now for importing to match intended lazy logic of
+ # urlopen function defined elsewhere in this module
+ import urllib.request
+
+ # assuming storage_options is to be interpreted as headers
+ req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
+ with urlopen(req_info) as req:
+ content_encoding = req.headers.get("Content-Encoding", None)
+ if content_encoding == "gzip":
+ # Override compression based on Content-Encoding header
+ compression = {"method": "gzip"}
+ reader = BytesIO(req.read())
+ return IOArgs(
+ filepath_or_buffer=reader,
+ encoding=encoding,
+ compression=compression,
+ should_close=True,
+ mode=fsspec_mode,
+ )
+
+ if is_fsspec_url(filepath_or_buffer):
+ assert isinstance(
+ filepath_or_buffer, str
+ ) # just to appease mypy for this branch
+ # two special-case s3-like protocols; these have special meaning in Hadoop,
+ # but are equivalent to just "s3" from fsspec's point of view
+ # cc #11071
+ if filepath_or_buffer.startswith("s3a://"):
+ filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
+ if filepath_or_buffer.startswith("s3n://"):
+ filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
+ fsspec = import_optional_dependency("fsspec")
+
+ # If botocore is installed we fallback to reading with anon=True
+ # to allow reads from public buckets
+ err_types_to_retry_with_anon: list[Any] = []
+ try:
+ import_optional_dependency("botocore")
+ from botocore.exceptions import (
+ ClientError,
+ NoCredentialsError,
+ )
+
+ err_types_to_retry_with_anon = [
+ ClientError,
+ NoCredentialsError,
+ PermissionError,
+ ]
+ except ImportError:
+ pass
+
+ try:
+ file_obj = fsspec.open(
+ filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
+ ).open()
+ # GH 34626 Reads from Public Buckets without Credentials needs anon=True
+ except tuple(err_types_to_retry_with_anon):
+ if storage_options is None:
+ storage_options = {"anon": True}
+ else:
+ # don't mutate user input.
+ storage_options = dict(storage_options)
+ storage_options["anon"] = True
+ file_obj = fsspec.open(
+ filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
+ ).open()
+
+ return IOArgs(
+ filepath_or_buffer=file_obj,
+ encoding=encoding,
+ compression=compression,
+ should_close=True,
+ mode=fsspec_mode,
+ )
+ elif storage_options:
+ raise ValueError(
+ "storage_options passed with file object or non-fsspec file path"
+ )
+
+ if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
+ return IOArgs(
+ filepath_or_buffer=_expand_user(filepath_or_buffer),
+ encoding=encoding,
+ compression=compression,
+ should_close=False,
+ mode=mode,
+ )
+
+ # is_file_like requires (read | write) & __iter__ but __iter__ is only
+ # needed for read_csv(engine=python)
+ if not (
+ hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write")
+ ):
+ msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
+ raise ValueError(msg)
+
+ return IOArgs(
+ filepath_or_buffer=filepath_or_buffer,
+ encoding=encoding,
+ compression=compression,
+ should_close=False,
+ mode=mode,
+ )
+
+
+def file_path_to_url(path: str) -> str:
+ """
+ converts an absolute native path to a FILE URL.
+
+ Parameters
+ ----------
+ path : a path in native format
+
+ Returns
+ -------
+ a valid FILE URL
+ """
+ # lazify expensive import (~30ms)
+ from urllib.request import pathname2url
+
+ return urljoin("file:", pathname2url(path))
+
+
+extension_to_compression = {
+ ".tar": "tar",
+ ".tar.gz": "tar",
+ ".tar.bz2": "tar",
+ ".tar.xz": "tar",
+ ".gz": "gzip",
+ ".bz2": "bz2",
+ ".zip": "zip",
+ ".xz": "xz",
+ ".zst": "zstd",
+}
+_supported_compressions = set(extension_to_compression.values())
+
+
+def get_compression_method(
+ compression: CompressionOptions,
+) -> tuple[str | None, CompressionDict]:
+ """
+ Simplifies a compression argument to a compression method string and
+ a mapping containing additional arguments.
+
+ Parameters
+ ----------
+ compression : str or mapping
+ If string, specifies the compression method. If mapping, value at key
+ 'method' specifies compression method.
+
+ Returns
+ -------
+ tuple of ({compression method}, Optional[str]
+ {compression arguments}, Dict[str, Any])
+
+ Raises
+ ------
+ ValueError on mapping missing 'method' key
+ """
+ compression_method: str | None
+ if isinstance(compression, Mapping):
+ compression_args = dict(compression)
+ try:
+ compression_method = compression_args.pop("method")
+ except KeyError as err:
+ raise ValueError("If mapping, compression must have key 'method'") from err
+ else:
+ compression_args = {}
+ compression_method = compression
+ return compression_method, compression_args
+
+
+@doc(compression_options=_shared_docs["compression_options"] % "filepath_or_buffer")
+def infer_compression(
+ filepath_or_buffer: FilePath | BaseBuffer, compression: str | None
+) -> str | None:
+ """
+ Get the compression method for filepath_or_buffer. If compression='infer',
+ the inferred compression method is returned. Otherwise, the input
+ compression method is returned unchanged, unless it's invalid, in which
+ case an error is raised.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str or file handle
+ File path or object.
+ {compression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ Returns
+ -------
+ string or None
+
+ Raises
+ ------
+ ValueError on invalid compression specified.
+ """
+ if compression is None:
+ return None
+
+ # Infer compression
+ if compression == "infer":
+ # Convert all path types (e.g. pathlib.Path) to strings
+ filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
+ if not isinstance(filepath_or_buffer, str):
+ # Cannot infer compression of a buffer, assume no compression
+ return None
+
+ # Infer compression from the filename/URL extension
+ for extension, compression in extension_to_compression.items():
+ if filepath_or_buffer.lower().endswith(extension):
+ return compression
+ return None
+
+ # Compression has been specified. Check that it's valid
+ if compression in _supported_compressions:
+ return compression
+
+ valid = ["infer", None] + sorted(_supported_compressions)
+ msg = (
+ f"Unrecognized compression type: {compression}\n"
+ f"Valid compression types are {valid}"
+ )
+ raise ValueError(msg)
+
+
+def check_parent_directory(path: Path | str) -> None:
+ """
+ Check if parent directory of a file exists, raise OSError if it does not
+
+ Parameters
+ ----------
+ path: Path or str
+ Path to check parent directory of
+ """
+ parent = Path(path).parent
+ if not parent.is_dir():
+ raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'")
+
+
+@overload
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = ...,
+ compression: CompressionOptions = ...,
+ memory_map: bool = ...,
+ is_text: Literal[False],
+ errors: str | None = ...,
+ storage_options: StorageOptions = ...,
+) -> IOHandles[bytes]:
+ ...
+
+
+@overload
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = ...,
+ compression: CompressionOptions = ...,
+ memory_map: bool = ...,
+ is_text: Literal[True] = ...,
+ errors: str | None = ...,
+ storage_options: StorageOptions = ...,
+) -> IOHandles[str]:
+ ...
+
+
+@overload
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = ...,
+ compression: CompressionOptions = ...,
+ memory_map: bool = ...,
+ is_text: bool = ...,
+ errors: str | None = ...,
+ storage_options: StorageOptions = ...,
+) -> IOHandles[str] | IOHandles[bytes]:
+ ...
+
+
+@doc(compression_options=_shared_docs["compression_options"] % "path_or_buf")
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = None,
+ compression: CompressionOptions | None = None,
+ memory_map: bool = False,
+ is_text: bool = True,
+ errors: str | None = None,
+ storage_options: StorageOptions | None = None,
+) -> IOHandles[str] | IOHandles[bytes]:
+ """
+ Get file handle for given path/buffer and mode.
+
+ Parameters
+ ----------
+ path_or_buf : str or file handle
+ File path or object.
+ mode : str
+ Mode to open path_or_buf with.
+ encoding : str or None
+ Encoding to use.
+ {compression_options}
+
+ May be a dict with key 'method' as compression mode
+ and other keys as compression options if compression
+ mode is 'zip'.
+
+ Passing compression options as keys in dict is
+ supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'.
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ memory_map : bool, default False
+ See parsers._parser_params for more information. Only used by read_csv.
+ is_text : bool, default True
+ Whether the type of the content passed to the file/buffer is string or
+ bytes. This is not the same as `"b" not in mode`. If a string content is
+ passed to a binary file/buffer, a wrapper is inserted.
+ errors : str, default 'strict'
+ Specifies how encoding and decoding errors are to be handled.
+ See the errors argument for :func:`open` for a full list
+ of options.
+ storage_options: StorageOptions = None
+ Passed to _get_filepath_or_buffer
+
+ Returns the dataclass IOHandles
+ """
+ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior
+ encoding = encoding or "utf-8"
+
+ errors = errors or "strict"
+
+ # read_csv does not know whether the buffer is opened in binary/text mode
+ if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
+ mode += "b"
+
+ # validate encoding and errors
+ codecs.lookup(encoding)
+ if isinstance(errors, str):
+ codecs.lookup_error(errors)
+
+ # open URLs
+ ioargs = _get_filepath_or_buffer(
+ path_or_buf,
+ encoding=encoding,
+ compression=compression,
+ mode=mode,
+ storage_options=storage_options,
+ )
+
+ handle = ioargs.filepath_or_buffer
+ handles: list[BaseBuffer]
+
+ # memory mapping needs to be the first step
+ # only used for read_csv
+ handle, memory_map, handles = _maybe_memory_map(handle, memory_map)
+
+ is_path = isinstance(handle, str)
+ compression_args = dict(ioargs.compression)
+ compression = compression_args.pop("method")
+
+ # Only for write methods
+ if "r" not in mode and is_path:
+ check_parent_directory(str(handle))
+
+ if compression:
+ if compression != "zstd":
+ # compression libraries do not like an explicit text-mode
+ ioargs.mode = ioargs.mode.replace("t", "")
+ elif compression == "zstd" and "b" not in ioargs.mode:
+ # python-zstandard defaults to text mode, but we always expect
+ # compression libraries to use binary mode.
+ ioargs.mode += "b"
+
+ # GZ Compression
+ if compression == "gzip":
+ if isinstance(handle, str):
+ # error: Incompatible types in assignment (expression has type
+ # "GzipFile", variable has type "Union[str, BaseBuffer]")
+ handle = gzip.GzipFile( # type: ignore[assignment]
+ filename=handle,
+ mode=ioargs.mode,
+ **compression_args,
+ )
+ else:
+ handle = gzip.GzipFile(
+ # No overload variant of "GzipFile" matches argument types
+ # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
+ fileobj=handle, # type: ignore[call-overload]
+ mode=ioargs.mode,
+ **compression_args,
+ )
+
+ # BZ Compression
+ elif compression == "bz2":
+ # Overload of "BZ2File" to handle pickle protocol 5
+ # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
+ handle = get_bz2_file()( # type: ignore[call-overload]
+ handle,
+ mode=ioargs.mode,
+ **compression_args,
+ )
+
+ # ZIP Compression
+ elif compression == "zip":
+ # error: Argument 1 to "_BytesZipFile" has incompatible type
+ # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]],
+ # ReadBuffer[bytes], WriteBuffer[bytes]]"
+ handle = _BytesZipFile(
+ handle, ioargs.mode, **compression_args # type: ignore[arg-type]
+ )
+ if handle.buffer.mode == "r":
+ handles.append(handle)
+ zip_names = handle.buffer.namelist()
+ if len(zip_names) == 1:
+ handle = handle.buffer.open(zip_names.pop())
+ elif not zip_names:
+ raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
+ else:
+ raise ValueError(
+ "Multiple files found in ZIP file. "
+ f"Only one file per ZIP: {zip_names}"
+ )
+
+ # TAR Encoding
+ elif compression == "tar":
+ compression_args.setdefault("mode", ioargs.mode)
+ if isinstance(handle, str):
+ handle = _BytesTarFile(name=handle, **compression_args)
+ else:
+ # error: Argument "fileobj" to "_BytesTarFile" has incompatible
+ # type "BaseBuffer"; expected "Union[ReadBuffer[bytes],
+ # WriteBuffer[bytes], None]"
+ handle = _BytesTarFile(
+ fileobj=handle, **compression_args # type: ignore[arg-type]
+ )
+ assert isinstance(handle, _BytesTarFile)
+ if "r" in handle.buffer.mode:
+ handles.append(handle)
+ files = handle.buffer.getnames()
+ if len(files) == 1:
+ file = handle.buffer.extractfile(files[0])
+ assert file is not None
+ handle = file
+ elif not files:
+ raise ValueError(f"Zero files found in TAR archive {path_or_buf}")
+ else:
+ raise ValueError(
+ "Multiple files found in TAR archive. "
+ f"Only one file per TAR archive: {files}"
+ )
+
+ # XZ Compression
+ elif compression == "xz":
+ # error: Argument 1 to "LZMAFile" has incompatible type "Union[str,
+ # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str],
+ # PathLike[bytes]], IO[bytes]], None]"
+ handle = get_lzma_file()(
+ handle, ioargs.mode, **compression_args # type: ignore[arg-type]
+ )
+
+ # Zstd Compression
+ elif compression == "zstd":
+ zstd = import_optional_dependency("zstandard")
+ if "r" in ioargs.mode:
+ open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)}
+ else:
+ open_args = {"cctx": zstd.ZstdCompressor(**compression_args)}
+ handle = zstd.open(
+ handle,
+ mode=ioargs.mode,
+ **open_args,
+ )
+
+ # Unrecognized Compression
+ else:
+ msg = f"Unrecognized compression type: {compression}"
+ raise ValueError(msg)
+
+ assert not isinstance(handle, str)
+ handles.append(handle)
+
+ elif isinstance(handle, str):
+ # Check whether the filename is to be opened in binary mode.
+ # Binary mode does not support 'encoding' and 'newline'.
+ if ioargs.encoding and "b" not in ioargs.mode:
+ # Encoding
+ handle = open(
+ handle,
+ ioargs.mode,
+ encoding=ioargs.encoding,
+ errors=errors,
+ newline="",
+ )
+ else:
+ # Binary mode
+ handle = open(handle, ioargs.mode)
+ handles.append(handle)
+
+ # Convert BytesIO or file objects passed with an encoding
+ is_wrapped = False
+ if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase):
+ # not added to handles as it does not open/buffer resources
+ handle = _BytesIOWrapper(
+ handle,
+ encoding=ioargs.encoding,
+ )
+ elif is_text and (
+ compression or memory_map or _is_binary_mode(handle, ioargs.mode)
+ ):
+ if (
+ not hasattr(handle, "readable")
+ or not hasattr(handle, "writable")
+ or not hasattr(handle, "seekable")
+ ):
+ handle = _IOWrapper(handle)
+ # error: Argument 1 to "TextIOWrapper" has incompatible type
+ # "_IOWrapper"; expected "IO[bytes]"
+ handle = TextIOWrapper(
+ handle, # type: ignore[arg-type]
+ encoding=ioargs.encoding,
+ errors=errors,
+ newline="",
+ )
+ handles.append(handle)
+ # only marked as wrapped when the caller provided a handle
+ is_wrapped = not (
+ isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
+ )
+
+ if "r" in ioargs.mode and not hasattr(handle, "read"):
+ raise TypeError(
+ "Expected file path name or file-like object, "
+ f"got {type(ioargs.filepath_or_buffer)} type"
+ )
+
+ handles.reverse() # close the most recently added buffer first
+ if ioargs.should_close:
+ assert not isinstance(ioargs.filepath_or_buffer, str)
+ handles.append(ioargs.filepath_or_buffer)
+
+ return IOHandles(
+ # error: Argument "handle" to "IOHandles" has incompatible type
+ # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes],
+ # typing.IO[Any]]"; expected "pandas._typing.IO[Any]"
+ handle=handle, # type: ignore[arg-type]
+ # error: Argument "created_handles" to "IOHandles" has incompatible type
+ # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]"
+ created_handles=handles, # type: ignore[arg-type]
+ is_wrapped=is_wrapped,
+ compression=ioargs.compression,
+ )
+
+
+# error: Definition of "__enter__" in base class "IOBase" is incompatible
+# with definition in base class "BinaryIO"
+class _BufferedWriter(BytesIO, ABC): # type: ignore[misc]
+ """
+ Some objects do not support multiple .write() calls (TarFile and ZipFile).
+ This wrapper writes to the underlying buffer on close.
+ """
+
+ buffer = BytesIO()
+
+ @abstractmethod
+ def write_to_buffer(self) -> None:
+ ...
+
+ def close(self) -> None:
+ if self.closed:
+ # already closed
+ return
+ if self.getbuffer().nbytes:
+ # write to buffer
+ self.seek(0)
+ with self.buffer:
+ self.write_to_buffer()
+ else:
+ self.buffer.close()
+ super().close()
+
+
+class _BytesTarFile(_BufferedWriter):
+ def __init__(
+ self,
+ name: str | None = None,
+ mode: Literal["r", "a", "w", "x"] = "r",
+ fileobj: ReadBuffer[bytes] | WriteBuffer[bytes] | None = None,
+ archive_name: str | None = None,
+ **kwargs,
+ ) -> None:
+ super().__init__()
+ self.archive_name = archive_name
+ self.name = name
+ # error: Incompatible types in assignment (expression has type "TarFile",
+ # base class "_BufferedWriter" defined the type as "BytesIO")
+ self.buffer: tarfile.TarFile = tarfile.TarFile.open( # type: ignore[assignment]
+ name=name,
+ mode=self.extend_mode(mode),
+ fileobj=fileobj,
+ **kwargs,
+ )
+
+ def extend_mode(self, mode: str) -> str:
+ mode = mode.replace("b", "")
+ if mode != "w":
+ return mode
+ if self.name is not None:
+ suffix = Path(self.name).suffix
+ if suffix in (".gz", ".xz", ".bz2"):
+ mode = f"{mode}:{suffix[1:]}"
+ return mode
+
+ def infer_filename(self) -> str | None:
+ """
+ If an explicit archive_name is not given, we still want the file inside the zip
+ file not to be named something.tar, because that causes confusion (GH39465).
+ """
+ if self.name is None:
+ return None
+
+ filename = Path(self.name)
+ if filename.suffix == ".tar":
+ return filename.with_suffix("").name
+ elif filename.suffix in (".tar.gz", ".tar.bz2", ".tar.xz"):
+ return filename.with_suffix("").with_suffix("").name
+ return filename.name
+
+ def write_to_buffer(self) -> None:
+ # TarFile needs a non-empty string
+ archive_name = self.archive_name or self.infer_filename() or "tar"
+ tarinfo = tarfile.TarInfo(name=archive_name)
+ tarinfo.size = len(self.getvalue())
+ self.buffer.addfile(tarinfo, self)
+
+
+class _BytesZipFile(_BufferedWriter):
+ def __init__(
+ self,
+ file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
+ mode: str,
+ archive_name: str | None = None,
+ **kwargs,
+ ) -> None:
+ super().__init__()
+ mode = mode.replace("b", "")
+ self.archive_name = archive_name
+
+ kwargs.setdefault("compression", zipfile.ZIP_DEFLATED)
+ # error: Incompatible types in assignment (expression has type "ZipFile",
+ # base class "_BufferedWriter" defined the type as "BytesIO")
+ self.buffer: zipfile.ZipFile = zipfile.ZipFile( # type: ignore[assignment]
+ file, mode, **kwargs
+ )
+
+ def infer_filename(self) -> str | None:
+ """
+ If an explicit archive_name is not given, we still want the file inside the zip
+ file not to be named something.zip, because that causes confusion (GH39465).
+ """
+ if isinstance(self.buffer.filename, (os.PathLike, str)):
+ filename = Path(self.buffer.filename)
+ if filename.suffix == ".zip":
+ return filename.with_suffix("").name
+ return filename.name
+ return None
+
+ def write_to_buffer(self) -> None:
+ # ZipFile needs a non-empty string
+ archive_name = self.archive_name or self.infer_filename() or "zip"
+ self.buffer.writestr(archive_name, self.getvalue())
+
+
+class _IOWrapper:
+ # TextIOWrapper is overly strict: it request that the buffer has seekable, readable,
+ # and writable. If we have a read-only buffer, we shouldn't need writable and vice
+ # versa. Some buffers, are seek/read/writ-able but they do not have the "-able"
+ # methods, e.g., tempfile.SpooledTemporaryFile.
+ # If a buffer does not have the above "-able" methods, we simple assume they are
+ # seek/read/writ-able.
+ def __init__(self, buffer: BaseBuffer) -> None:
+ self.buffer = buffer
+
+ def __getattr__(self, name: str):
+ return getattr(self.buffer, name)
+
+ def readable(self) -> bool:
+ if hasattr(self.buffer, "readable"):
+ return self.buffer.readable()
+ return True
+
+ def seekable(self) -> bool:
+ if hasattr(self.buffer, "seekable"):
+ return self.buffer.seekable()
+ return True
+
+ def writable(self) -> bool:
+ if hasattr(self.buffer, "writable"):
+ return self.buffer.writable()
+ return True
+
+
+class _BytesIOWrapper:
+ # Wrapper that wraps a StringIO buffer and reads bytes from it
+ # Created for compat with pyarrow read_csv
+ def __init__(self, buffer: StringIO | TextIOBase, encoding: str = "utf-8") -> None:
+ self.buffer = buffer
+ self.encoding = encoding
+ # Because a character can be represented by more than 1 byte,
+ # it is possible that reading will produce more bytes than n
+ # We store the extra bytes in this overflow variable, and append the
+ # overflow to the front of the bytestring the next time reading is performed
+ self.overflow = b""
+
+ def __getattr__(self, attr: str):
+ return getattr(self.buffer, attr)
+
+ def read(self, n: int | None = -1) -> bytes:
+ assert self.buffer is not None
+ bytestring = self.buffer.read(n).encode(self.encoding)
+ # When n=-1/n greater than remaining bytes: Read entire file/rest of file
+ combined_bytestring = self.overflow + bytestring
+ if n is None or n < 0 or n >= len(combined_bytestring):
+ self.overflow = b""
+ return combined_bytestring
+ else:
+ to_return = combined_bytestring[:n]
+ self.overflow = combined_bytestring[n:]
+ return to_return
+
+
+def _maybe_memory_map(
+ handle: str | BaseBuffer, memory_map: bool
+) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:
+ """Try to memory map file/buffer."""
+ handles: list[BaseBuffer] = []
+ memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
+ if not memory_map:
+ return handle, memory_map, handles
+
+ # mmap used by only read_csv
+ handle = cast(ReadCsvBuffer, handle)
+
+ # need to open the file first
+ if isinstance(handle, str):
+ handle = open(handle, "rb")
+ handles.append(handle)
+
+ try:
+ # open mmap and adds *-able
+ # error: Argument 1 to "_IOWrapper" has incompatible type "mmap";
+ # expected "BaseBuffer"
+ wrapped = _IOWrapper(
+ mmap.mmap(
+ handle.fileno(), 0, access=mmap.ACCESS_READ # type: ignore[arg-type]
+ )
+ )
+ finally:
+ for handle in reversed(handles):
+ # error: "BaseBuffer" has no attribute "close"
+ handle.close() # type: ignore[attr-defined]
+
+ return wrapped, memory_map, [wrapped]
+
+
+def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool:
+ """Test whether file exists."""
+ exists = False
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+ if not isinstance(filepath_or_buffer, str):
+ return exists
+ try:
+ exists = os.path.exists(filepath_or_buffer)
+ # gh-5874: if the filepath is too long will raise here
+ except (TypeError, ValueError):
+ pass
+ return exists
+
+
+def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool:
+ """Whether the handle is opened in binary mode"""
+ # specified by user
+ if "t" in mode or "b" in mode:
+ return "b" in mode
+
+ # exceptions
+ text_classes = (
+ # classes that expect string but have 'b' in mode
+ codecs.StreamWriter,
+ codecs.StreamReader,
+ codecs.StreamReaderWriter,
+ )
+ if issubclass(type(handle), text_classes):
+ return False
+
+ return isinstance(handle, _get_binary_io_classes()) or "b" in getattr(
+ handle, "mode", mode
+ )
+
+
+@functools.lru_cache
+def _get_binary_io_classes() -> tuple[type, ...]:
+ """IO classes that that expect bytes"""
+ binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase)
+
+ # python-zstandard doesn't use any of the builtin base classes; instead we
+ # have to use the `zstd.ZstdDecompressionReader` class for isinstance checks.
+ # Unfortunately `zstd.ZstdDecompressionReader` isn't exposed by python-zstandard
+ # so we have to get it from a `zstd.ZstdDecompressor` instance.
+ # See also https://github.com/indygreg/python-zstandard/pull/165.
+ zstd = import_optional_dependency("zstandard", errors="ignore")
+ if zstd is not None:
+ with zstd.ZstdDecompressor().stream_reader(b"") as reader:
+ binary_classes += (type(reader),)
+
+ return binary_classes
+
+
+def is_potential_multi_index(
+ columns: Sequence[Hashable] | MultiIndex,
+ index_col: bool | Sequence[int] | None = None,
+) -> bool:
+ """
+ Check whether or not the `columns` parameter
+ could be converted into a MultiIndex.
+
+ Parameters
+ ----------
+ columns : array-like
+ Object which may or may not be convertible into a MultiIndex
+ index_col : None, bool or list, optional
+ Column or columns to use as the (possibly hierarchical) index
+
+ Returns
+ -------
+ bool : Whether or not columns could become a MultiIndex
+ """
+ if index_col is None or isinstance(index_col, bool):
+ index_col = []
+
+ return bool(
+ len(columns)
+ and not isinstance(columns, ABCMultiIndex)
+ and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
+ )
+
+
+def dedup_names(
+ names: Sequence[Hashable], is_potential_multiindex: bool
+) -> Sequence[Hashable]:
+ """
+ Rename column names if duplicates exist.
+
+ Currently the renaming is done by appending a period and an autonumeric,
+ but a custom pattern may be supported in the future.
+
+ Examples
+ --------
+ >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False)
+ ['x', 'y', 'x.1', 'x.2']
+ """
+ names = list(names) # so we can index
+ counts: DefaultDict[Hashable, int] = defaultdict(int)
+
+ for i, col in enumerate(names):
+ cur_count = counts[col]
+
+ while cur_count > 0:
+ counts[col] = cur_count + 1
+
+ if is_potential_multiindex:
+ # for mypy
+ assert isinstance(col, tuple)
+ col = col[:-1] + (f"{col[-1]}.{cur_count}",)
+ else:
+ col = f"{col}.{cur_count}"
+ cur_count = counts[col]
+
+ names[i] = col
+ counts[col] = cur_count + 1
+
+ return names
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..275cbf0148f944eb04ca6c40c624cc5df77aa626
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__init__.py
@@ -0,0 +1,19 @@
+from pandas.io.excel._base import (
+ ExcelFile,
+ ExcelWriter,
+ read_excel,
+)
+from pandas.io.excel._odswriter import ODSWriter as _ODSWriter
+from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter
+from pandas.io.excel._util import register_writer
+from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter
+
+__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
+
+
+register_writer(_OpenpyxlWriter)
+
+register_writer(_XlsxWriter)
+
+
+register_writer(_ODSWriter)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20d83e9c808e9c5acc5b76b67e7b949874f8f831
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f87c9f573f76b25ba61448f260043d4fd1878abf
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9f3d0f48724a32f252e0f47b1518fb287643853
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6e0ae5288264d5a777afd98073a1ccaa62df357
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..829bc763d9177b016288f2962bded93ced19b2ed
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c323db53091059797429260019fd8ad38de05073
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2b0b834e900f5efbc939b53880dfe11a8634f67
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d30f8f8c86849664fc82b80e83716bc71d6ab4a7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..52c3d4cb8a4a28637ed63c75b96f35cb9d71a2dd
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..81f804f2b4474c237aeb7f1e8cbb4a64a5c5fb2f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_base.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..786f719337b84a29e5b6ea7577edd412b596920f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_base.py
@@ -0,0 +1,1659 @@
+from __future__ import annotations
+
+from collections.abc import (
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+)
+import datetime
+from functools import partial
+from io import BytesIO
+import os
+from textwrap import fill
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Generic,
+ Literal,
+ TypeVar,
+ Union,
+ cast,
+ overload,
+)
+import warnings
+import zipfile
+
+from pandas._config import config
+
+from pandas._libs import lib
+from pandas._libs.parsers import STR_NA_VALUES
+from pandas.compat._optional import (
+ get_version,
+ import_optional_dependency,
+)
+from pandas.errors import EmptyDataError
+from pandas.util._decorators import (
+ Appender,
+ doc,
+)
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.common import (
+ is_bool,
+ is_float,
+ is_integer,
+ is_list_like,
+)
+
+from pandas.core.frame import DataFrame
+from pandas.core.shared_docs import _shared_docs
+from pandas.util.version import Version
+
+from pandas.io.common import (
+ IOHandles,
+ get_handle,
+ stringify_path,
+ validate_header_arg,
+)
+from pandas.io.excel._util import (
+ fill_mi_header,
+ get_default_engine,
+ get_writer,
+ maybe_convert_usecols,
+ pop_header_name,
+)
+from pandas.io.parsers import TextParser
+from pandas.io.parsers.readers import validate_integer
+
+if TYPE_CHECKING:
+ from types import TracebackType
+
+ from pandas._typing import (
+ DtypeArg,
+ DtypeBackend,
+ ExcelWriterIfSheetExists,
+ FilePath,
+ IntStrT,
+ ReadBuffer,
+ Self,
+ SequenceNotStr,
+ StorageOptions,
+ WriteExcelBuffer,
+ )
+_read_excel_doc = (
+ """
+Read an Excel file into a ``pandas`` ``DataFrame``.
+
+Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions
+read from a local filesystem or URL. Supports an option to read
+a single sheet or a list of sheets.
+
+Parameters
+----------
+io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
+ Any valid string path is acceptable. The string could be a URL. Valid
+ URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
+
+ If you want to pass in a path object, pandas accepts any ``os.PathLike``.
+
+ By file-like object, we refer to objects with a ``read()`` method,
+ such as a file handle (e.g. via builtin ``open`` function)
+ or ``StringIO``.
+
+ .. deprecated:: 2.1.0
+ Passing byte strings is deprecated. To read from a
+ byte string, wrap it in a ``BytesIO`` object.
+sheet_name : str, int, list, or None, default 0
+ Strings are used for sheet names. Integers are used in zero-indexed
+ sheet positions (chart sheets do not count as a sheet position).
+ Lists of strings/integers are used to request multiple sheets.
+ Specify ``None`` to get all worksheets.
+
+ Available cases:
+
+ * Defaults to ``0``: 1st sheet as a `DataFrame`
+ * ``1``: 2nd sheet as a `DataFrame`
+ * ``"Sheet1"``: Load sheet with name "Sheet1"
+ * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
+ as a dict of `DataFrame`
+ * ``None``: All worksheets.
+
+header : int, list of int, default 0
+ Row (0-indexed) to use for the column labels of the parsed
+ DataFrame. If a list of integers is passed those row positions will
+ be combined into a ``MultiIndex``. Use None if there is no header.
+names : array-like, default None
+ List of column names to use. If file contains no header row,
+ then you should explicitly pass header=None.
+index_col : int, str, list of int, default None
+ Column (0-indexed) to use as the row labels of the DataFrame.
+ Pass None if there is no such column. If a list is passed,
+ those columns will be combined into a ``MultiIndex``. If a
+ subset of data is selected with ``usecols``, index_col
+ is based on the subset.
+
+ Missing values will be forward filled to allow roundtripping with
+ ``to_excel`` for ``merged_cells=True``. To avoid forward filling the
+ missing values use ``set_index`` after reading the data instead of
+ ``index_col``.
+usecols : str, list-like, or callable, default None
+ * If None, then parse all columns.
+ * If str, then indicates comma separated list of Excel column letters
+ and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
+ both sides.
+ * If list of int, then indicates list of column numbers to be parsed
+ (0-indexed).
+ * If list of string, then indicates list of column names to be parsed.
+ * If callable, then evaluate each column name against it and parse the
+ column if the callable returns ``True``.
+
+ Returns a subset of the columns according to behavior above.
+dtype : Type name or dict of column -> type, default None
+ Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}}
+ Use ``object`` to preserve data as stored in Excel and not interpret dtype,
+ which will necessarily result in ``object`` dtype.
+ If converters are specified, they will be applied INSTEAD
+ of dtype conversion.
+ If you use ``None``, it will infer the dtype of each column based on the data.
+engine : {{'openpyxl', 'calamine', 'odf', 'pyxlsb', 'xlrd'}}, default None
+ If io is not a buffer or path, this must be set to identify io.
+ Engine compatibility :
+
+ - ``openpyxl`` supports newer Excel file formats.
+ - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
+ and OpenDocument (.ods) file formats.
+ - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
+ - ``pyxlsb`` supports Binary Excel files.
+ - ``xlrd`` supports old-style Excel files (.xls).
+
+ When ``engine=None``, the following logic will be used to determine the engine:
+
+ - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
+ then `odf `_ will be used.
+ - Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used.
+ - Otherwise if ``path_or_buffer`` is in xlsb format, ``pyxlsb`` will be used.
+ - Otherwise ``openpyxl`` will be used.
+converters : dict, default None
+ Dict of functions for converting values in certain columns. Keys can
+ either be integers or column labels, values are functions that take one
+ input argument, the Excel cell content, and return the transformed
+ content.
+true_values : list, default None
+ Values to consider as True.
+false_values : list, default None
+ Values to consider as False.
+skiprows : list-like, int, or callable, optional
+ Line numbers to skip (0-indexed) or number of lines to skip (int) at the
+ start of the file. If callable, the callable function will be evaluated
+ against the row indices, returning True if the row should be skipped and
+ False otherwise. An example of a valid callable argument would be ``lambda
+ x: x in [0, 2]``.
+nrows : int, default None
+ Number of rows to parse.
+na_values : scalar, str, list-like, or dict, default None
+ Additional strings to recognize as NA/NaN. If dict passed, specific
+ per-column NA values. By default the following values are interpreted
+ as NaN: '"""
+ + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ + """'.
+keep_default_na : bool, default True
+ Whether or not to include the default NaN values when parsing the data.
+ Depending on whether ``na_values`` is passed in, the behavior is as follows:
+
+ * If ``keep_default_na`` is True, and ``na_values`` are specified,
+ ``na_values`` is appended to the default NaN values used for parsing.
+ * If ``keep_default_na`` is True, and ``na_values`` are not specified, only
+ the default NaN values are used for parsing.
+ * If ``keep_default_na`` is False, and ``na_values`` are specified, only
+ the NaN values specified ``na_values`` are used for parsing.
+ * If ``keep_default_na`` is False, and ``na_values`` are not specified, no
+ strings will be parsed as NaN.
+
+ Note that if `na_filter` is passed in as False, the ``keep_default_na`` and
+ ``na_values`` parameters will be ignored.
+na_filter : bool, default True
+ Detect missing value markers (empty strings and the value of na_values). In
+ data without any NAs, passing ``na_filter=False`` can improve the
+ performance of reading a large file.
+verbose : bool, default False
+ Indicate number of NA values placed in non-numeric columns.
+parse_dates : bool, list-like, or dict, default False
+ The behavior is as follows:
+
+ * ``bool``. If True -> try parsing the index.
+ * ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
+ each as a separate date column.
+ * ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
+ a single date column.
+ * ``dict``, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
+ result 'foo'
+
+ If a column or index contains an unparsable date, the entire column or
+ index will be returned unaltered as an object data type. If you don`t want to
+ parse some cells as date just change their type in Excel to "Text".
+ For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
+
+ Note: A fast-path exists for iso8601-formatted dates.
+date_parser : function, optional
+ Function to use for converting a sequence of string columns to an array of
+ datetime instances. The default uses ``dateutil.parser.parser`` to do the
+ conversion. Pandas will try to call `date_parser` in three different ways,
+ advancing to the next if an exception occurs: 1) Pass one or more arrays
+ (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
+ string values from the columns defined by `parse_dates` into a single array
+ and pass that; and 3) call `date_parser` once for each row using one or
+ more strings (corresponding to the columns defined by `parse_dates`) as
+ arguments.
+
+ .. deprecated:: 2.0.0
+ Use ``date_format`` instead, or read in as ``object`` and then apply
+ :func:`to_datetime` as-needed.
+date_format : str or dict of column -> format, default ``None``
+ If used in conjunction with ``parse_dates``, will parse dates according to this
+ format. For anything more complex,
+ please read in as ``object`` and then apply :func:`to_datetime` as-needed.
+
+ .. versionadded:: 2.0.0
+thousands : str, default None
+ Thousands separator for parsing string columns to numeric. Note that
+ this parameter is only necessary for columns stored as TEXT in Excel,
+ any numeric columns will automatically be parsed, regardless of display
+ format.
+decimal : str, default '.'
+ Character to recognize as decimal point for parsing string columns to numeric.
+ Note that this parameter is only necessary for columns stored as TEXT in Excel,
+ any numeric columns will automatically be parsed, regardless of display
+ format.(e.g. use ',' for European data).
+
+ .. versionadded:: 1.4.0
+
+comment : str, default None
+ Comments out remainder of line. Pass a character or characters to this
+ argument to indicate comments in the input file. Any data between the
+ comment string and the end of the current line is ignored.
+skipfooter : int, default 0
+ Rows at the end to skip (0-indexed).
+{storage_options}
+
+dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+
+Returns
+-------
+DataFrame or dict of DataFrames
+ DataFrame from the passed in Excel file. See notes in sheet_name
+ argument for more information on when a dict of DataFrames is returned.
+
+See Also
+--------
+DataFrame.to_excel : Write DataFrame to an Excel file.
+DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
+read_csv : Read a comma-separated values (csv) file into DataFrame.
+read_fwf : Read a table of fixed-width formatted lines into DataFrame.
+
+Notes
+-----
+For specific information on the methods used for each Excel engine, refer to the pandas
+:ref:`user guide `
+
+Examples
+--------
+The file can be read using the file name as string or an open file object:
+
+>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
+ Name Value
+0 string1 1
+1 string2 2
+2 #Comment 3
+
+>>> pd.read_excel(open('tmp.xlsx', 'rb'),
+... sheet_name='Sheet3') # doctest: +SKIP
+ Unnamed: 0 Name Value
+0 0 string1 1
+1 1 string2 2
+2 2 #Comment 3
+
+Index and header can be specified via the `index_col` and `header` arguments
+
+>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
+ 0 1 2
+0 NaN Name Value
+1 0.0 string1 1
+2 1.0 string2 2
+3 2.0 #Comment 3
+
+Column types are inferred but can be explicitly specified
+
+>>> pd.read_excel('tmp.xlsx', index_col=0,
+... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP
+ Name Value
+0 string1 1.0
+1 string2 2.0
+2 #Comment 3.0
+
+True, False, and NA values, and thousands separators have defaults,
+but can be explicitly specified, too. Supply the values you would like
+as strings or lists of strings!
+
+>>> pd.read_excel('tmp.xlsx', index_col=0,
+... na_values=['string1', 'string2']) # doctest: +SKIP
+ Name Value
+0 NaN 1
+1 NaN 2
+2 #Comment 3
+
+Comment lines in the excel input file can be skipped using the
+``comment`` kwarg.
+
+>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
+ Name Value
+0 string1 1.0
+1 string2 2.0
+2 None NaN
+"""
+)
+
+
+@overload
+def read_excel(
+ io,
+ # sheet name is str or int -> DataFrame
+ sheet_name: str | int = ...,
+ *,
+ header: int | Sequence[int] | None = ...,
+ names: SequenceNotStr[Hashable] | range | None = ...,
+ index_col: int | str | Sequence[int] | None = ...,
+ usecols: int
+ | str
+ | Sequence[int]
+ | Sequence[str]
+ | Callable[[str], bool]
+ | None = ...,
+ dtype: DtypeArg | None = ...,
+ engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
+ converters: dict[str, Callable] | dict[int, Callable] | None = ...,
+ true_values: Iterable[Hashable] | None = ...,
+ false_values: Iterable[Hashable] | None = ...,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
+ nrows: int | None = ...,
+ na_values=...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool = ...,
+ parse_dates: list | dict | bool = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: dict[Hashable, str] | str | None = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ comment: str | None = ...,
+ skipfooter: int = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame:
+ ...
+
+
+@overload
+def read_excel(
+ io,
+ # sheet name is list or None -> dict[IntStrT, DataFrame]
+ sheet_name: list[IntStrT] | None,
+ *,
+ header: int | Sequence[int] | None = ...,
+ names: SequenceNotStr[Hashable] | range | None = ...,
+ index_col: int | str | Sequence[int] | None = ...,
+ usecols: int
+ | str
+ | Sequence[int]
+ | Sequence[str]
+ | Callable[[str], bool]
+ | None = ...,
+ dtype: DtypeArg | None = ...,
+ engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
+ converters: dict[str, Callable] | dict[int, Callable] | None = ...,
+ true_values: Iterable[Hashable] | None = ...,
+ false_values: Iterable[Hashable] | None = ...,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
+ nrows: int | None = ...,
+ na_values=...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool = ...,
+ parse_dates: list | dict | bool = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: dict[Hashable, str] | str | None = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ comment: str | None = ...,
+ skipfooter: int = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> dict[IntStrT, DataFrame]:
+ ...
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+@Appender(_read_excel_doc)
+def read_excel(
+ io,
+ sheet_name: str | int | list[IntStrT] | None = 0,
+ *,
+ header: int | Sequence[int] | None = 0,
+ names: SequenceNotStr[Hashable] | range | None = None,
+ index_col: int | str | Sequence[int] | None = None,
+ usecols: int
+ | str
+ | Sequence[int]
+ | Sequence[str]
+ | Callable[[str], bool]
+ | None = None,
+ dtype: DtypeArg | None = None,
+ engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = None,
+ converters: dict[str, Callable] | dict[int, Callable] | None = None,
+ true_values: Iterable[Hashable] | None = None,
+ false_values: Iterable[Hashable] | None = None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
+ nrows: int | None = None,
+ na_values=None,
+ keep_default_na: bool = True,
+ na_filter: bool = True,
+ verbose: bool = False,
+ parse_dates: list | dict | bool = False,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: dict[Hashable, str] | str | None = None,
+ thousands: str | None = None,
+ decimal: str = ".",
+ comment: str | None = None,
+ skipfooter: int = 0,
+ storage_options: StorageOptions | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ engine_kwargs: dict | None = None,
+) -> DataFrame | dict[IntStrT, DataFrame]:
+ check_dtype_backend(dtype_backend)
+ should_close = False
+ if engine_kwargs is None:
+ engine_kwargs = {}
+
+ if not isinstance(io, ExcelFile):
+ should_close = True
+ io = ExcelFile(
+ io,
+ storage_options=storage_options,
+ engine=engine,
+ engine_kwargs=engine_kwargs,
+ )
+ elif engine and engine != io.engine:
+ raise ValueError(
+ "Engine should not be specified when passing "
+ "an ExcelFile - ExcelFile already has the engine set"
+ )
+
+ try:
+ data = io.parse(
+ sheet_name=sheet_name,
+ header=header,
+ names=names,
+ index_col=index_col,
+ usecols=usecols,
+ dtype=dtype,
+ converters=converters,
+ true_values=true_values,
+ false_values=false_values,
+ skiprows=skiprows,
+ nrows=nrows,
+ na_values=na_values,
+ keep_default_na=keep_default_na,
+ na_filter=na_filter,
+ verbose=verbose,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ date_format=date_format,
+ thousands=thousands,
+ decimal=decimal,
+ comment=comment,
+ skipfooter=skipfooter,
+ dtype_backend=dtype_backend,
+ )
+ finally:
+ # make sure to close opened file handles
+ if should_close:
+ io.close()
+ return data
+
+
+_WorkbookT = TypeVar("_WorkbookT")
+
+
+class BaseExcelReader(Generic[_WorkbookT]):
+ book: _WorkbookT
+
+ def __init__(
+ self,
+ filepath_or_buffer,
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ if engine_kwargs is None:
+ engine_kwargs = {}
+
+ # First argument can also be bytes, so create a buffer
+ if isinstance(filepath_or_buffer, bytes):
+ filepath_or_buffer = BytesIO(filepath_or_buffer)
+
+ self.handles = IOHandles(
+ handle=filepath_or_buffer, compression={"method": None}
+ )
+ if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
+ self.handles = get_handle(
+ filepath_or_buffer, "rb", storage_options=storage_options, is_text=False
+ )
+
+ if isinstance(self.handles.handle, self._workbook_class):
+ self.book = self.handles.handle
+ elif hasattr(self.handles.handle, "read"):
+ # N.B. xlrd.Book has a read attribute too
+ self.handles.handle.seek(0)
+ try:
+ self.book = self.load_workbook(self.handles.handle, engine_kwargs)
+ except Exception:
+ self.close()
+ raise
+ else:
+ raise ValueError(
+ "Must explicitly set engine if not passing in buffer or path for io."
+ )
+
+ @property
+ def _workbook_class(self) -> type[_WorkbookT]:
+ raise NotImplementedError
+
+ def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT:
+ raise NotImplementedError
+
+ def close(self) -> None:
+ if hasattr(self, "book"):
+ if hasattr(self.book, "close"):
+ # pyxlsb: opens a TemporaryFile
+ # openpyxl: https://stackoverflow.com/questions/31416842/
+ # openpyxl-does-not-close-excel-workbook-in-read-only-mode
+ self.book.close()
+ elif hasattr(self.book, "release_resources"):
+ # xlrd
+ # https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548
+ self.book.release_resources()
+ self.handles.close()
+
+ @property
+ def sheet_names(self) -> list[str]:
+ raise NotImplementedError
+
+ def get_sheet_by_name(self, name: str):
+ raise NotImplementedError
+
+ def get_sheet_by_index(self, index: int):
+ raise NotImplementedError
+
+ def get_sheet_data(self, sheet, rows: int | None = None):
+ raise NotImplementedError
+
+ def raise_if_bad_sheet_by_index(self, index: int) -> None:
+ n_sheets = len(self.sheet_names)
+ if index >= n_sheets:
+ raise ValueError(
+ f"Worksheet index {index} is invalid, {n_sheets} worksheets found"
+ )
+
+ def raise_if_bad_sheet_by_name(self, name: str) -> None:
+ if name not in self.sheet_names:
+ raise ValueError(f"Worksheet named '{name}' not found")
+
+ def _check_skiprows_func(
+ self,
+ skiprows: Callable,
+ rows_to_use: int,
+ ) -> int:
+ """
+ Determine how many file rows are required to obtain `nrows` data
+ rows when `skiprows` is a function.
+
+ Parameters
+ ----------
+ skiprows : function
+ The function passed to read_excel by the user.
+ rows_to_use : int
+ The number of rows that will be needed for the header and
+ the data.
+
+ Returns
+ -------
+ int
+ """
+ i = 0
+ rows_used_so_far = 0
+ while rows_used_so_far < rows_to_use:
+ if not skiprows(i):
+ rows_used_so_far += 1
+ i += 1
+ return i
+
+ def _calc_rows(
+ self,
+ header: int | Sequence[int] | None,
+ index_col: int | Sequence[int] | None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None,
+ nrows: int | None,
+ ) -> int | None:
+ """
+ If nrows specified, find the number of rows needed from the
+ file, otherwise return None.
+
+
+ Parameters
+ ----------
+ header : int, list of int, or None
+ See read_excel docstring.
+ index_col : int, str, list of int, or None
+ See read_excel docstring.
+ skiprows : list-like, int, callable, or None
+ See read_excel docstring.
+ nrows : int or None
+ See read_excel docstring.
+
+ Returns
+ -------
+ int or None
+ """
+ if nrows is None:
+ return None
+ if header is None:
+ header_rows = 1
+ elif is_integer(header):
+ header = cast(int, header)
+ header_rows = 1 + header
+ else:
+ header = cast(Sequence, header)
+ header_rows = 1 + header[-1]
+ # If there is a MultiIndex header and an index then there is also
+ # a row containing just the index name(s)
+ if is_list_like(header) and index_col is not None:
+ header = cast(Sequence, header)
+ if len(header) > 1:
+ header_rows += 1
+ if skiprows is None:
+ return header_rows + nrows
+ if is_integer(skiprows):
+ skiprows = cast(int, skiprows)
+ return header_rows + nrows + skiprows
+ if is_list_like(skiprows):
+
+ def f(skiprows: Sequence, x: int) -> bool:
+ return x in skiprows
+
+ skiprows = cast(Sequence, skiprows)
+ return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)
+ if callable(skiprows):
+ return self._check_skiprows_func(
+ skiprows,
+ header_rows + nrows,
+ )
+ # else unexpected skiprows type: read_excel will not optimize
+ # the number of rows read from file
+ return None
+
+ def parse(
+ self,
+ sheet_name: str | int | list[int] | list[str] | None = 0,
+ header: int | Sequence[int] | None = 0,
+ names: SequenceNotStr[Hashable] | range | None = None,
+ index_col: int | Sequence[int] | None = None,
+ usecols=None,
+ dtype: DtypeArg | None = None,
+ true_values: Iterable[Hashable] | None = None,
+ false_values: Iterable[Hashable] | None = None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
+ nrows: int | None = None,
+ na_values=None,
+ verbose: bool = False,
+ parse_dates: list | dict | bool = False,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: dict[Hashable, str] | str | None = None,
+ thousands: str | None = None,
+ decimal: str = ".",
+ comment: str | None = None,
+ skipfooter: int = 0,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ **kwds,
+ ):
+ validate_header_arg(header)
+ validate_integer("nrows", nrows)
+
+ ret_dict = False
+
+ # Keep sheetname to maintain backwards compatibility.
+ sheets: list[int] | list[str]
+ if isinstance(sheet_name, list):
+ sheets = sheet_name
+ ret_dict = True
+ elif sheet_name is None:
+ sheets = self.sheet_names
+ ret_dict = True
+ elif isinstance(sheet_name, str):
+ sheets = [sheet_name]
+ else:
+ sheets = [sheet_name]
+
+ # handle same-type duplicates.
+ sheets = cast(Union[list[int], list[str]], list(dict.fromkeys(sheets).keys()))
+
+ output = {}
+
+ last_sheetname = None
+ for asheetname in sheets:
+ last_sheetname = asheetname
+ if verbose:
+ print(f"Reading sheet {asheetname}")
+
+ if isinstance(asheetname, str):
+ sheet = self.get_sheet_by_name(asheetname)
+ else: # assume an integer if not a string
+ sheet = self.get_sheet_by_index(asheetname)
+
+ file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)
+ data = self.get_sheet_data(sheet, file_rows_needed)
+ if hasattr(sheet, "close"):
+ # pyxlsb opens two TemporaryFiles
+ sheet.close()
+ usecols = maybe_convert_usecols(usecols)
+
+ if not data:
+ output[asheetname] = DataFrame()
+ continue
+
+ is_list_header = False
+ is_len_one_list_header = False
+ if is_list_like(header):
+ assert isinstance(header, Sequence)
+ is_list_header = True
+ if len(header) == 1:
+ is_len_one_list_header = True
+
+ if is_len_one_list_header:
+ header = cast(Sequence[int], header)[0]
+
+ # forward fill and pull out names for MultiIndex column
+ header_names = None
+ if header is not None and is_list_like(header):
+ assert isinstance(header, Sequence)
+
+ header_names = []
+ control_row = [True] * len(data[0])
+
+ for row in header:
+ if is_integer(skiprows):
+ assert isinstance(skiprows, int)
+ row += skiprows
+
+ if row > len(data) - 1:
+ raise ValueError(
+ f"header index {row} exceeds maximum index "
+ f"{len(data) - 1} of data.",
+ )
+
+ data[row], control_row = fill_mi_header(data[row], control_row)
+
+ if index_col is not None:
+ header_name, _ = pop_header_name(data[row], index_col)
+ header_names.append(header_name)
+
+ # If there is a MultiIndex header and an index then there is also
+ # a row containing just the index name(s)
+ has_index_names = False
+ if is_list_header and not is_len_one_list_header and index_col is not None:
+ index_col_list: Sequence[int]
+ if isinstance(index_col, int):
+ index_col_list = [index_col]
+ else:
+ assert isinstance(index_col, Sequence)
+ index_col_list = index_col
+
+ # We have to handle mi without names. If any of the entries in the data
+ # columns are not empty, this is a regular row
+ assert isinstance(header, Sequence)
+ if len(header) < len(data):
+ potential_index_names = data[len(header)]
+ potential_data = [
+ x
+ for i, x in enumerate(potential_index_names)
+ if not control_row[i] and i not in index_col_list
+ ]
+ has_index_names = all(x == "" or x is None for x in potential_data)
+
+ if is_list_like(index_col):
+ # Forward fill values for MultiIndex index.
+ if header is None:
+ offset = 0
+ elif isinstance(header, int):
+ offset = 1 + header
+ else:
+ offset = 1 + max(header)
+
+ # GH34673: if MultiIndex names present and not defined in the header,
+ # offset needs to be incremented so that forward filling starts
+ # from the first MI value instead of the name
+ if has_index_names:
+ offset += 1
+
+ # Check if we have an empty dataset
+ # before trying to collect data.
+ if offset < len(data):
+ assert isinstance(index_col, Sequence)
+
+ for col in index_col:
+ last = data[offset][col]
+
+ for row in range(offset + 1, len(data)):
+ if data[row][col] == "" or data[row][col] is None:
+ data[row][col] = last
+ else:
+ last = data[row][col]
+
+ # GH 12292 : error when read one empty column from excel file
+ try:
+ parser = TextParser(
+ data,
+ names=names,
+ header=header,
+ index_col=index_col,
+ has_index_names=has_index_names,
+ dtype=dtype,
+ true_values=true_values,
+ false_values=false_values,
+ skiprows=skiprows,
+ nrows=nrows,
+ na_values=na_values,
+ skip_blank_lines=False, # GH 39808
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ date_format=date_format,
+ thousands=thousands,
+ decimal=decimal,
+ comment=comment,
+ skipfooter=skipfooter,
+ usecols=usecols,
+ dtype_backend=dtype_backend,
+ **kwds,
+ )
+
+ output[asheetname] = parser.read(nrows=nrows)
+
+ if header_names:
+ output[asheetname].columns = output[asheetname].columns.set_names(
+ header_names
+ )
+
+ except EmptyDataError:
+ # No Data, return an empty DataFrame
+ output[asheetname] = DataFrame()
+
+ except Exception as err:
+ err.args = (f"{err.args[0]} (sheet: {asheetname})", *err.args[1:])
+ raise err
+
+ if last_sheetname is None:
+ raise ValueError("Sheet name is an empty list")
+
+ if ret_dict:
+ return output
+ else:
+ return output[last_sheetname]
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+class ExcelWriter(Generic[_WorkbookT]):
+ """
+ Class for writing DataFrame objects into excel sheets.
+
+ Default is to use:
+
+ * `xlsxwriter `__ for xlsx files if xlsxwriter
+ is installed otherwise `openpyxl `__
+ * `odswriter `__ for ods files
+
+ See ``DataFrame.to_excel`` for typical usage.
+
+ The writer should be used as a context manager. Otherwise, call `close()` to save
+ and close any opened file handles.
+
+ Parameters
+ ----------
+ path : str or typing.BinaryIO
+ Path to xls or xlsx or ods file.
+ engine : str (optional)
+ Engine to use for writing. If None, defaults to
+ ``io.excel..writer``. NOTE: can only be passed as a keyword
+ argument.
+ date_format : str, default None
+ Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
+ datetime_format : str, default None
+ Format string for datetime objects written into Excel files.
+ (e.g. 'YYYY-MM-DD HH:MM:SS').
+ mode : {{'w', 'a'}}, default 'w'
+ File mode to use (write or append). Append does not work with fsspec URLs.
+ {storage_options}
+
+ if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error'
+ How to behave when trying to write to a sheet that already
+ exists (append mode only).
+
+ * error: raise a ValueError.
+ * new: Create a new sheet, with a name determined by the engine.
+ * replace: Delete the contents of the sheet before writing to it.
+ * overlay: Write contents to the existing sheet without first removing,
+ but possibly over top of, the existing contents.
+
+ .. versionadded:: 1.3.0
+
+ .. versionchanged:: 1.4.0
+
+ Added ``overlay`` option
+
+ engine_kwargs : dict, optional
+ Keyword arguments to be passed into the engine. These will be passed to
+ the following functions of the respective engines:
+
+ * xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
+ * openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
+ * openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
+ * odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
+
+ .. versionadded:: 1.3.0
+
+ Notes
+ -----
+ For compatibility with CSV writers, ExcelWriter serializes lists
+ and dicts to strings before writing.
+
+ Examples
+ --------
+ Default usage:
+
+ >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
+ >>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
+ ... df.to_excel(writer) # doctest: +SKIP
+
+ To write to separate sheets in a single file:
+
+ >>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP
+ >>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
+ >>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
+ ... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
+ ... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
+
+ You can set the date format or datetime format:
+
+ >>> from datetime import date, datetime # doctest: +SKIP
+ >>> df = pd.DataFrame(
+ ... [
+ ... [date(2014, 1, 31), date(1999, 9, 24)],
+ ... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
+ ... ],
+ ... index=["Date", "Datetime"],
+ ... columns=["X", "Y"],
+ ... ) # doctest: +SKIP
+ >>> with pd.ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... date_format="YYYY-MM-DD",
+ ... datetime_format="YYYY-MM-DD HH:MM:SS"
+ ... ) as writer:
+ ... df.to_excel(writer) # doctest: +SKIP
+
+ You can also append to an existing Excel file:
+
+ >>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
+ ... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP
+
+ Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
+ already exists:
+
+ >>> with ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... mode="a",
+ ... engine="openpyxl",
+ ... if_sheet_exists="replace",
+ ... ) as writer:
+ ... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
+
+ You can also write multiple DataFrames to a single sheet. Note that the
+ ``if_sheet_exists`` parameter needs to be set to ``overlay``:
+
+ >>> with ExcelWriter("path_to_file.xlsx",
+ ... mode="a",
+ ... engine="openpyxl",
+ ... if_sheet_exists="overlay",
+ ... ) as writer:
+ ... df1.to_excel(writer, sheet_name="Sheet1")
+ ... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP
+
+ You can store Excel file in RAM:
+
+ >>> import io
+ >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
+ >>> buffer = io.BytesIO()
+ >>> with pd.ExcelWriter(buffer) as writer:
+ ... df.to_excel(writer)
+
+ You can pack Excel file into zip archive:
+
+ >>> import zipfile # doctest: +SKIP
+ >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
+ >>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
+ ... with zf.open("filename.xlsx", "w") as buffer:
+ ... with pd.ExcelWriter(buffer) as writer:
+ ... df.to_excel(writer) # doctest: +SKIP
+
+ You can specify additional arguments to the underlying engine:
+
+ >>> with pd.ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... engine="xlsxwriter",
+ ... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}
+ ... ) as writer:
+ ... df.to_excel(writer) # doctest: +SKIP
+
+ In append mode, ``engine_kwargs`` are passed through to
+ openpyxl's ``load_workbook``:
+
+ >>> with pd.ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... engine="openpyxl",
+ ... mode="a",
+ ... engine_kwargs={{"keep_vba": True}}
+ ... ) as writer:
+ ... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
+ """
+
+ # Defining an ExcelWriter implementation (see abstract methods for more...)
+
+ # - Mandatory
+ # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
+ # --> called to write additional DataFrames to disk
+ # - ``_supported_extensions`` (tuple of supported extensions), used to
+ # check that engine supports the given extension.
+ # - ``_engine`` - string that gives the engine name. Necessary to
+ # instantiate class directly and bypass ``ExcelWriterMeta`` engine
+ # lookup.
+ # - ``save(self)`` --> called to save file to disk
+ # - Mostly mandatory (i.e. should at least exist)
+ # - book, cur_sheet, path
+
+ # - Optional:
+ # - ``__init__(self, path, engine=None, **kwargs)`` --> always called
+ # with path as first argument.
+
+ # You also need to register the class with ``register_writer()``.
+ # Technically, ExcelWriter implementations don't need to subclass
+ # ExcelWriter.
+
+ _engine: str
+ _supported_extensions: tuple[str, ...]
+
+ def __new__(
+ cls,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format: str | None = None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> Self:
+ # only switch class if generic(ExcelWriter)
+ if cls is ExcelWriter:
+ if engine is None or (isinstance(engine, str) and engine == "auto"):
+ if isinstance(path, str):
+ ext = os.path.splitext(path)[-1][1:]
+ else:
+ ext = "xlsx"
+
+ try:
+ engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
+ if engine == "auto":
+ engine = get_default_engine(ext, mode="writer")
+ except KeyError as err:
+ raise ValueError(f"No engine for filetype: '{ext}'") from err
+
+ # for mypy
+ assert engine is not None
+ # error: Incompatible types in assignment (expression has type
+ # "type[ExcelWriter[Any]]", variable has type "type[Self]")
+ cls = get_writer(engine) # type: ignore[assignment]
+
+ return object.__new__(cls)
+
+ # declare external properties you can count on
+ _path = None
+
+ @property
+ def supported_extensions(self) -> tuple[str, ...]:
+ """Extensions that writer engine supports."""
+ return self._supported_extensions
+
+ @property
+ def engine(self) -> str:
+ """Name of engine."""
+ return self._engine
+
+ @property
+ def sheets(self) -> dict[str, Any]:
+ """Mapping of sheet names to sheet objects."""
+ raise NotImplementedError
+
+ @property
+ def book(self) -> _WorkbookT:
+ """
+ Book instance. Class type will depend on the engine used.
+
+ This attribute can be used to access engine-specific features.
+ """
+ raise NotImplementedError
+
+ def _write_cells(
+ self,
+ cells,
+ sheet_name: str | None = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ freeze_panes: tuple[int, int] | None = None,
+ ) -> None:
+ """
+ Write given formatted cells into Excel an excel sheet
+
+ Parameters
+ ----------
+ cells : generator
+ cell of formatted data to save to Excel sheet
+ sheet_name : str, default None
+ Name of Excel sheet, if None, then use self.cur_sheet
+ startrow : upper left cell row to dump data frame
+ startcol : upper left cell column to dump data frame
+ freeze_panes: int tuple of length 2
+ contains the bottom-most row and right-most column to freeze
+ """
+ raise NotImplementedError
+
+ def _save(self) -> None:
+ """
+ Save workbook to disk.
+ """
+ raise NotImplementedError
+
+ def __init__(
+ self,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format: str | None = None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict[str, Any] | None = None,
+ ) -> None:
+ # validate that this engine can handle the extension
+ if isinstance(path, str):
+ ext = os.path.splitext(path)[-1]
+ self.check_extension(ext)
+
+ # use mode to open the file
+ if "b" not in mode:
+ mode += "b"
+ # use "a" for the user to append data to excel but internally use "r+" to let
+ # the excel backend first read the existing file and then write any data to it
+ mode = mode.replace("a", "r+")
+
+ if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
+ raise ValueError(
+ f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
+ "Valid options are 'error', 'new', 'replace' and 'overlay'."
+ )
+ if if_sheet_exists and "r+" not in mode:
+ raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
+ if if_sheet_exists is None:
+ if_sheet_exists = "error"
+ self._if_sheet_exists = if_sheet_exists
+
+ # cast ExcelWriter to avoid adding 'if self._handles is not None'
+ self._handles = IOHandles(
+ cast(IO[bytes], path), compression={"compression": None}
+ )
+ if not isinstance(path, ExcelWriter):
+ self._handles = get_handle(
+ path, mode, storage_options=storage_options, is_text=False
+ )
+ self._cur_sheet = None
+
+ if date_format is None:
+ self._date_format = "YYYY-MM-DD"
+ else:
+ self._date_format = date_format
+ if datetime_format is None:
+ self._datetime_format = "YYYY-MM-DD HH:MM:SS"
+ else:
+ self._datetime_format = datetime_format
+
+ self._mode = mode
+
+ @property
+ def date_format(self) -> str:
+ """
+ Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
+ """
+ return self._date_format
+
+ @property
+ def datetime_format(self) -> str:
+ """
+ Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
+ """
+ return self._datetime_format
+
+ @property
+ def if_sheet_exists(self) -> str:
+ """
+ How to behave when writing to a sheet that already exists in append mode.
+ """
+ return self._if_sheet_exists
+
+ def __fspath__(self) -> str:
+ return getattr(self._handles.handle, "name", "")
+
+ def _get_sheet_name(self, sheet_name: str | None) -> str:
+ if sheet_name is None:
+ sheet_name = self._cur_sheet
+ if sheet_name is None: # pragma: no cover
+ raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")
+ return sheet_name
+
+ def _value_with_fmt(
+ self, val
+ ) -> tuple[
+ int | float | bool | str | datetime.datetime | datetime.date, str | None
+ ]:
+ """
+ Convert numpy types to Python types for the Excel writers.
+
+ Parameters
+ ----------
+ val : object
+ Value to be written into cells
+
+ Returns
+ -------
+ Tuple with the first element being the converted value and the second
+ being an optional format
+ """
+ fmt = None
+
+ if is_integer(val):
+ val = int(val)
+ elif is_float(val):
+ val = float(val)
+ elif is_bool(val):
+ val = bool(val)
+ elif isinstance(val, datetime.datetime):
+ fmt = self._datetime_format
+ elif isinstance(val, datetime.date):
+ fmt = self._date_format
+ elif isinstance(val, datetime.timedelta):
+ val = val.total_seconds() / 86400
+ fmt = "0"
+ else:
+ val = str(val)
+
+ return val, fmt
+
+ @classmethod
+ def check_extension(cls, ext: str) -> Literal[True]:
+ """
+ checks that path's extension against the Writer's supported
+ extensions. If it isn't supported, raises UnsupportedFiletypeError.
+ """
+ if ext.startswith("."):
+ ext = ext[1:]
+ if not any(ext in extension for extension in cls._supported_extensions):
+ raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
+ return True
+
+ # Allow use as a contextmanager
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+ def close(self) -> None:
+ """synonym for save, to make it more file-like"""
+ self._save()
+ self._handles.close()
+
+
+XLS_SIGNATURES = (
+ b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2
+ b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3
+ b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4
+ b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary
+)
+ZIP_SIGNATURE = b"PK\x03\x04"
+PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+def inspect_excel_format(
+ content_or_path: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+) -> str | None:
+ """
+ Inspect the path or content of an excel file and get its format.
+
+ Adopted from xlrd: https://github.com/python-excel/xlrd.
+
+ Parameters
+ ----------
+ content_or_path : str or file-like object
+ Path to file or content of file to inspect. May be a URL.
+ {storage_options}
+
+ Returns
+ -------
+ str or None
+ Format of file if it can be determined.
+
+ Raises
+ ------
+ ValueError
+ If resulting stream is empty.
+ BadZipFile
+ If resulting stream does not have an XLS signature and is not a valid zipfile.
+ """
+ if isinstance(content_or_path, bytes):
+ content_or_path = BytesIO(content_or_path)
+
+ with get_handle(
+ content_or_path, "rb", storage_options=storage_options, is_text=False
+ ) as handle:
+ stream = handle.handle
+ stream.seek(0)
+ buf = stream.read(PEEK_SIZE)
+ if buf is None:
+ raise ValueError("stream is empty")
+ assert isinstance(buf, bytes)
+ peek = buf
+ stream.seek(0)
+
+ if any(peek.startswith(sig) for sig in XLS_SIGNATURES):
+ return "xls"
+ elif not peek.startswith(ZIP_SIGNATURE):
+ return None
+
+ with zipfile.ZipFile(stream) as zf:
+ # Workaround for some third party files that use forward slashes and
+ # lower case names.
+ component_names = [
+ name.replace("\\", "/").lower() for name in zf.namelist()
+ ]
+
+ if "xl/workbook.xml" in component_names:
+ return "xlsx"
+ if "xl/workbook.bin" in component_names:
+ return "xlsb"
+ if "content.xml" in component_names:
+ return "ods"
+ return "zip"
+
+
+class ExcelFile:
+ """
+ Class for parsing tabular Excel sheets into DataFrame objects.
+
+ See read_excel for more documentation.
+
+ Parameters
+ ----------
+ path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
+ A file-like object, xlrd workbook or openpyxl workbook.
+ If a string or path object, expected to be a path to a
+ .xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
+ engine : str, default None
+ If io is not a buffer or path, this must be set to identify io.
+ Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine``
+ Engine compatibility :
+
+ - ``xlrd`` supports old-style Excel files (.xls).
+ - ``openpyxl`` supports newer Excel file formats.
+ - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
+ - ``pyxlsb`` supports Binary Excel files.
+ - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
+ and OpenDocument (.ods) file formats.
+
+ .. versionchanged:: 1.2.0
+
+ The engine `xlrd `_
+ now only supports old-style ``.xls`` files.
+ When ``engine=None``, the following logic will be
+ used to determine the engine:
+
+ - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
+ then `odf `_ will be used.
+ - Otherwise if ``path_or_buffer`` is an xls format,
+ ``xlrd`` will be used.
+ - Otherwise if ``path_or_buffer`` is in xlsb format,
+ `pyxlsb `_ will be used.
+
+ .. versionadded:: 1.3.0
+
+ - Otherwise if `openpyxl `_ is installed,
+ then ``openpyxl`` will be used.
+ - Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
+
+ .. warning::
+
+ Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
+ This is not supported, switch to using ``openpyxl`` instead.
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+
+ Examples
+ --------
+ >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
+ >>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP
+ ... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP
+ """
+
+ from pandas.io.excel._calamine import CalamineReader
+ from pandas.io.excel._odfreader import ODFReader
+ from pandas.io.excel._openpyxl import OpenpyxlReader
+ from pandas.io.excel._pyxlsb import PyxlsbReader
+ from pandas.io.excel._xlrd import XlrdReader
+
+ _engines: Mapping[str, Any] = {
+ "xlrd": XlrdReader,
+ "openpyxl": OpenpyxlReader,
+ "odf": ODFReader,
+ "pyxlsb": PyxlsbReader,
+ "calamine": CalamineReader,
+ }
+
+ def __init__(
+ self,
+ path_or_buffer,
+ engine: str | None = None,
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ if engine_kwargs is None:
+ engine_kwargs = {}
+
+ if engine is not None and engine not in self._engines:
+ raise ValueError(f"Unknown engine: {engine}")
+
+ # First argument can also be bytes, so create a buffer
+ if isinstance(path_or_buffer, bytes):
+ path_or_buffer = BytesIO(path_or_buffer)
+ warnings.warn(
+ "Passing bytes to 'read_excel' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "byte string, wrap it in a `BytesIO` object.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ # Could be a str, ExcelFile, Book, etc.
+ self.io = path_or_buffer
+ # Always a string
+ self._io = stringify_path(path_or_buffer)
+
+ # Determine xlrd version if installed
+ if import_optional_dependency("xlrd", errors="ignore") is None:
+ xlrd_version = None
+ else:
+ import xlrd
+
+ xlrd_version = Version(get_version(xlrd))
+
+ if engine is None:
+ # Only determine ext if it is needed
+ ext: str | None
+ if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
+ ext = "xls"
+ else:
+ ext = inspect_excel_format(
+ content_or_path=path_or_buffer, storage_options=storage_options
+ )
+ if ext is None:
+ raise ValueError(
+ "Excel file format cannot be determined, you must specify "
+ "an engine manually."
+ )
+
+ engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
+ if engine == "auto":
+ engine = get_default_engine(ext, mode="reader")
+
+ assert engine is not None
+ self.engine = engine
+ self.storage_options = storage_options
+
+ self._reader = self._engines[engine](
+ self._io,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ def __fspath__(self):
+ return self._io
+
+ def parse(
+ self,
+ sheet_name: str | int | list[int] | list[str] | None = 0,
+ header: int | Sequence[int] | None = 0,
+ names: SequenceNotStr[Hashable] | range | None = None,
+ index_col: int | Sequence[int] | None = None,
+ usecols=None,
+ converters=None,
+ true_values: Iterable[Hashable] | None = None,
+ false_values: Iterable[Hashable] | None = None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
+ nrows: int | None = None,
+ na_values=None,
+ parse_dates: list | dict | bool = False,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: str | dict[Hashable, str] | None = None,
+ thousands: str | None = None,
+ comment: str | None = None,
+ skipfooter: int = 0,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ **kwds,
+ ) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
+ """
+ Parse specified sheet(s) into a DataFrame.
+
+ Equivalent to read_excel(ExcelFile, ...) See the read_excel
+ docstring for more info on accepted parameters.
+
+ Returns
+ -------
+ DataFrame or dict of DataFrames
+ DataFrame from the passed in Excel file.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
+ >>> df.to_excel('myfile.xlsx') # doctest: +SKIP
+ >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
+ >>> file.parse() # doctest: +SKIP
+ """
+ return self._reader.parse(
+ sheet_name=sheet_name,
+ header=header,
+ names=names,
+ index_col=index_col,
+ usecols=usecols,
+ converters=converters,
+ true_values=true_values,
+ false_values=false_values,
+ skiprows=skiprows,
+ nrows=nrows,
+ na_values=na_values,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ date_format=date_format,
+ thousands=thousands,
+ comment=comment,
+ skipfooter=skipfooter,
+ dtype_backend=dtype_backend,
+ **kwds,
+ )
+
+ @property
+ def book(self):
+ return self._reader.book
+
+ @property
+ def sheet_names(self):
+ return self._reader.sheet_names
+
+ def close(self) -> None:
+ """close io if necessary"""
+ self._reader.close()
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_calamine.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_calamine.py
new file mode 100644
index 0000000000000000000000000000000000000000..5259469f7a569a1913aa49635b3c14e89a18d157
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_calamine.py
@@ -0,0 +1,121 @@
+from __future__ import annotations
+
+from datetime import (
+ date,
+ datetime,
+ time,
+ timedelta,
+)
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Union,
+)
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+import pandas as pd
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import BaseExcelReader
+
+if TYPE_CHECKING:
+ from python_calamine import (
+ CalamineSheet,
+ CalamineWorkbook,
+ )
+
+ from pandas._typing import (
+ FilePath,
+ NaTType,
+ ReadBuffer,
+ Scalar,
+ StorageOptions,
+ )
+
+_CellValue = Union[int, float, str, bool, time, date, datetime, timedelta]
+
+
+class CalamineReader(BaseExcelReader["CalamineWorkbook"]):
+ @doc(storage_options=_shared_docs["storage_options"])
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Reader using calamine engine (xlsx/xls/xlsb/ods).
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path to be parsed or
+ an open readable stream.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ import_optional_dependency("python_calamine")
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[CalamineWorkbook]:
+ from python_calamine import CalamineWorkbook
+
+ return CalamineWorkbook
+
+ def load_workbook(
+ self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs: Any
+ ) -> CalamineWorkbook:
+ from python_calamine import load_workbook
+
+ return load_workbook(filepath_or_buffer, **engine_kwargs)
+
+ @property
+ def sheet_names(self) -> list[str]:
+ from python_calamine import SheetTypeEnum
+
+ return [
+ sheet.name
+ for sheet in self.book.sheets_metadata
+ if sheet.typ == SheetTypeEnum.WorkSheet
+ ]
+
+ def get_sheet_by_name(self, name: str) -> CalamineSheet:
+ self.raise_if_bad_sheet_by_name(name)
+ return self.book.get_sheet_by_name(name)
+
+ def get_sheet_by_index(self, index: int) -> CalamineSheet:
+ self.raise_if_bad_sheet_by_index(index)
+ return self.book.get_sheet_by_index(index)
+
+ def get_sheet_data(
+ self, sheet: CalamineSheet, file_rows_needed: int | None = None
+ ) -> list[list[Scalar | NaTType | time]]:
+ def _convert_cell(value: _CellValue) -> Scalar | NaTType | time:
+ if isinstance(value, float):
+ val = int(value)
+ if val == value:
+ return val
+ else:
+ return value
+ elif isinstance(value, date):
+ return pd.Timestamp(value)
+ elif isinstance(value, timedelta):
+ return pd.Timedelta(value)
+ elif isinstance(value, time):
+ return value
+
+ return value
+
+ rows: list[list[_CellValue]] = sheet.to_python(
+ skip_empty_area=False, nrows=file_rows_needed
+ )
+ data = [[_convert_cell(cell) for cell in row] for row in rows]
+
+ return data
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py
new file mode 100644
index 0000000000000000000000000000000000000000..69b514da32857119f048a25f647d1002315a9889
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py
@@ -0,0 +1,253 @@
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ cast,
+)
+
+import numpy as np
+
+from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+ Scalar,
+ StorageOptions,
+)
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+import pandas as pd
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import BaseExcelReader
+
+if TYPE_CHECKING:
+ from odf.opendocument import OpenDocument
+
+ from pandas._libs.tslibs.nattype import NaTType
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+class ODFReader(BaseExcelReader["OpenDocument"]):
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Read tables out of OpenDocument formatted files.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path to be parsed or
+ an open readable stream.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ import_optional_dependency("odf")
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[OpenDocument]:
+ from odf.opendocument import OpenDocument
+
+ return OpenDocument
+
+ def load_workbook(
+ self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
+ ) -> OpenDocument:
+ from odf.opendocument import load
+
+ return load(filepath_or_buffer, **engine_kwargs)
+
+ @property
+ def empty_value(self) -> str:
+ """Property for compat with other readers."""
+ return ""
+
+ @property
+ def sheet_names(self) -> list[str]:
+ """Return a list of sheet names present in the document"""
+ from odf.table import Table
+
+ tables = self.book.getElementsByType(Table)
+ return [t.getAttribute("name") for t in tables]
+
+ def get_sheet_by_index(self, index: int):
+ from odf.table import Table
+
+ self.raise_if_bad_sheet_by_index(index)
+ tables = self.book.getElementsByType(Table)
+ return tables[index]
+
+ def get_sheet_by_name(self, name: str):
+ from odf.table import Table
+
+ self.raise_if_bad_sheet_by_name(name)
+ tables = self.book.getElementsByType(Table)
+
+ for table in tables:
+ if table.getAttribute("name") == name:
+ return table
+
+ self.close()
+ raise ValueError(f"sheet {name} not found")
+
+ def get_sheet_data(
+ self, sheet, file_rows_needed: int | None = None
+ ) -> list[list[Scalar | NaTType]]:
+ """
+ Parse an ODF Table into a list of lists
+ """
+ from odf.table import (
+ CoveredTableCell,
+ TableCell,
+ TableRow,
+ )
+
+ covered_cell_name = CoveredTableCell().qname
+ table_cell_name = TableCell().qname
+ cell_names = {covered_cell_name, table_cell_name}
+
+ sheet_rows = sheet.getElementsByType(TableRow)
+ empty_rows = 0
+ max_row_len = 0
+
+ table: list[list[Scalar | NaTType]] = []
+
+ for sheet_row in sheet_rows:
+ sheet_cells = [
+ x
+ for x in sheet_row.childNodes
+ if hasattr(x, "qname") and x.qname in cell_names
+ ]
+ empty_cells = 0
+ table_row: list[Scalar | NaTType] = []
+
+ for sheet_cell in sheet_cells:
+ if sheet_cell.qname == table_cell_name:
+ value = self._get_cell_value(sheet_cell)
+ else:
+ value = self.empty_value
+
+ column_repeat = self._get_column_repeat(sheet_cell)
+
+ # Queue up empty values, writing only if content succeeds them
+ if value == self.empty_value:
+ empty_cells += column_repeat
+ else:
+ table_row.extend([self.empty_value] * empty_cells)
+ empty_cells = 0
+ table_row.extend([value] * column_repeat)
+
+ if max_row_len < len(table_row):
+ max_row_len = len(table_row)
+
+ row_repeat = self._get_row_repeat(sheet_row)
+ if len(table_row) == 0:
+ empty_rows += row_repeat
+ else:
+ # add blank rows to our table
+ table.extend([[self.empty_value]] * empty_rows)
+ empty_rows = 0
+ table.extend(table_row for _ in range(row_repeat))
+ if file_rows_needed is not None and len(table) >= file_rows_needed:
+ break
+
+ # Make our table square
+ for row in table:
+ if len(row) < max_row_len:
+ row.extend([self.empty_value] * (max_row_len - len(row)))
+
+ return table
+
+ def _get_row_repeat(self, row) -> int:
+ """
+ Return number of times this row was repeated
+ Repeating an empty row appeared to be a common way
+ of representing sparse rows in the table.
+ """
+ from odf.namespaces import TABLENS
+
+ return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))
+
+ def _get_column_repeat(self, cell) -> int:
+ from odf.namespaces import TABLENS
+
+ return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
+
+ def _get_cell_value(self, cell) -> Scalar | NaTType:
+ from odf.namespaces import OFFICENS
+
+ if str(cell) == "#N/A":
+ return np.nan
+
+ cell_type = cell.attributes.get((OFFICENS, "value-type"))
+ if cell_type == "boolean":
+ if str(cell) == "TRUE":
+ return True
+ return False
+ if cell_type is None:
+ return self.empty_value
+ elif cell_type == "float":
+ # GH5394
+ cell_value = float(cell.attributes.get((OFFICENS, "value")))
+ val = int(cell_value)
+ if val == cell_value:
+ return val
+ return cell_value
+ elif cell_type == "percentage":
+ cell_value = cell.attributes.get((OFFICENS, "value"))
+ return float(cell_value)
+ elif cell_type == "string":
+ return self._get_cell_string_value(cell)
+ elif cell_type == "currency":
+ cell_value = cell.attributes.get((OFFICENS, "value"))
+ return float(cell_value)
+ elif cell_type == "date":
+ cell_value = cell.attributes.get((OFFICENS, "date-value"))
+ return pd.Timestamp(cell_value)
+ elif cell_type == "time":
+ stamp = pd.Timestamp(str(cell))
+ # cast needed here because Scalar doesn't include datetime.time
+ return cast(Scalar, stamp.time())
+ else:
+ self.close()
+ raise ValueError(f"Unrecognized type {cell_type}")
+
+ def _get_cell_string_value(self, cell) -> str:
+ """
+ Find and decode OpenDocument text:s tags that represent
+ a run length encoded sequence of space characters.
+ """
+ from odf.element import Element
+ from odf.namespaces import TEXTNS
+ from odf.office import Annotation
+ from odf.text import S
+
+ office_annotation = Annotation().qname
+ text_s = S().qname
+
+ value = []
+
+ for fragment in cell.childNodes:
+ if isinstance(fragment, Element):
+ if fragment.qname == text_s:
+ spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))
+ value.append(" " * spaces)
+ elif fragment.qname == office_annotation:
+ continue
+ else:
+ # recursive impl needed in case of nested fragments
+ # with multiple spaces
+ # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704
+ value.append(self._get_cell_string_value(fragment))
+ else:
+ value.append(str(fragment).strip("\n"))
+ return "".join(value)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc7dca2d95b6b434279f8290fdf929e737f75459
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py
@@ -0,0 +1,357 @@
+from __future__ import annotations
+
+from collections import defaultdict
+import datetime
+import json
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ DefaultDict,
+ cast,
+ overload,
+)
+
+from pandas.io.excel._base import ExcelWriter
+from pandas.io.excel._util import (
+ combine_kwargs,
+ validate_freeze_panes,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ ExcelWriterIfSheetExists,
+ FilePath,
+ StorageOptions,
+ WriteExcelBuffer,
+ )
+
+ from pandas.io.formats.excel import ExcelCell
+
+
+class ODSWriter(ExcelWriter):
+ _engine = "odf"
+ _supported_extensions = (".ods",)
+
+ def __init__(
+ self,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format=None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict[str, Any] | None = None,
+ **kwargs,
+ ) -> None:
+ from odf.opendocument import OpenDocumentSpreadsheet
+
+ if mode == "a":
+ raise ValueError("Append mode is not supported with odf!")
+
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
+ self._book = OpenDocumentSpreadsheet(**engine_kwargs)
+
+ super().__init__(
+ path,
+ mode=mode,
+ storage_options=storage_options,
+ if_sheet_exists=if_sheet_exists,
+ engine_kwargs=engine_kwargs,
+ )
+
+ self._style_dict: dict[str, str] = {}
+
+ @property
+ def book(self):
+ """
+ Book instance of class odf.opendocument.OpenDocumentSpreadsheet.
+
+ This attribute can be used to access engine-specific features.
+ """
+ return self._book
+
+ @property
+ def sheets(self) -> dict[str, Any]:
+ """Mapping of sheet names to sheet objects."""
+ from odf.table import Table
+
+ result = {
+ sheet.getAttribute("name"): sheet
+ for sheet in self.book.getElementsByType(Table)
+ }
+ return result
+
+ def _save(self) -> None:
+ """
+ Save workbook to disk.
+ """
+ for sheet in self.sheets.values():
+ self.book.spreadsheet.addElement(sheet)
+ self.book.save(self._handles.handle)
+
+ def _write_cells(
+ self,
+ cells: list[ExcelCell],
+ sheet_name: str | None = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ freeze_panes: tuple[int, int] | None = None,
+ ) -> None:
+ """
+ Write the frame cells using odf
+ """
+ from odf.table import (
+ Table,
+ TableCell,
+ TableRow,
+ )
+ from odf.text import P
+
+ sheet_name = self._get_sheet_name(sheet_name)
+ assert sheet_name is not None
+
+ if sheet_name in self.sheets:
+ wks = self.sheets[sheet_name]
+ else:
+ wks = Table(name=sheet_name)
+ self.book.spreadsheet.addElement(wks)
+
+ if validate_freeze_panes(freeze_panes):
+ freeze_panes = cast(tuple[int, int], freeze_panes)
+ self._create_freeze_panes(sheet_name, freeze_panes)
+
+ for _ in range(startrow):
+ wks.addElement(TableRow())
+
+ rows: DefaultDict = defaultdict(TableRow)
+ col_count: DefaultDict = defaultdict(int)
+
+ for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)):
+ # only add empty cells if the row is still empty
+ if not col_count[cell.row]:
+ for _ in range(startcol):
+ rows[cell.row].addElement(TableCell())
+
+ # fill with empty cells if needed
+ for _ in range(cell.col - col_count[cell.row]):
+ rows[cell.row].addElement(TableCell())
+ col_count[cell.row] += 1
+
+ pvalue, tc = self._make_table_cell(cell)
+ rows[cell.row].addElement(tc)
+ col_count[cell.row] += 1
+ p = P(text=pvalue)
+ tc.addElement(p)
+
+ # add all rows to the sheet
+ if len(rows) > 0:
+ for row_nr in range(max(rows.keys()) + 1):
+ wks.addElement(rows[row_nr])
+
+ def _make_table_cell_attributes(self, cell) -> dict[str, int | str]:
+ """Convert cell attributes to OpenDocument attributes
+
+ Parameters
+ ----------
+ cell : ExcelCell
+ Spreadsheet cell data
+
+ Returns
+ -------
+ attributes : Dict[str, Union[int, str]]
+ Dictionary with attributes and attribute values
+ """
+ attributes: dict[str, int | str] = {}
+ style_name = self._process_style(cell.style)
+ if style_name is not None:
+ attributes["stylename"] = style_name
+ if cell.mergestart is not None and cell.mergeend is not None:
+ attributes["numberrowsspanned"] = max(1, cell.mergestart)
+ attributes["numbercolumnsspanned"] = cell.mergeend
+ return attributes
+
+ def _make_table_cell(self, cell) -> tuple[object, Any]:
+ """Convert cell data to an OpenDocument spreadsheet cell
+
+ Parameters
+ ----------
+ cell : ExcelCell
+ Spreadsheet cell data
+
+ Returns
+ -------
+ pvalue, cell : Tuple[str, TableCell]
+ Display value, Cell value
+ """
+ from odf.table import TableCell
+
+ attributes = self._make_table_cell_attributes(cell)
+ val, fmt = self._value_with_fmt(cell.val)
+ pvalue = value = val
+ if isinstance(val, bool):
+ value = str(val).lower()
+ pvalue = str(val).upper()
+ return (
+ pvalue,
+ TableCell(
+ valuetype="boolean",
+ booleanvalue=value,
+ attributes=attributes,
+ ),
+ )
+ elif isinstance(val, datetime.datetime):
+ # Fast formatting
+ value = val.isoformat()
+ # Slow but locale-dependent
+ pvalue = val.strftime("%c")
+ return (
+ pvalue,
+ TableCell(valuetype="date", datevalue=value, attributes=attributes),
+ )
+ elif isinstance(val, datetime.date):
+ # Fast formatting
+ value = f"{val.year}-{val.month:02d}-{val.day:02d}"
+ # Slow but locale-dependent
+ pvalue = val.strftime("%x")
+ return (
+ pvalue,
+ TableCell(valuetype="date", datevalue=value, attributes=attributes),
+ )
+ elif isinstance(val, str):
+ return (
+ pvalue,
+ TableCell(
+ valuetype="string",
+ stringvalue=value,
+ attributes=attributes,
+ ),
+ )
+ else:
+ return (
+ pvalue,
+ TableCell(
+ valuetype="float",
+ value=value,
+ attributes=attributes,
+ ),
+ )
+
+ @overload
+ def _process_style(self, style: dict[str, Any]) -> str:
+ ...
+
+ @overload
+ def _process_style(self, style: None) -> None:
+ ...
+
+ def _process_style(self, style: dict[str, Any] | None) -> str | None:
+ """Convert a style dictionary to a OpenDocument style sheet
+
+ Parameters
+ ----------
+ style : Dict
+ Style dictionary
+
+ Returns
+ -------
+ style_key : str
+ Unique style key for later reference in sheet
+ """
+ from odf.style import (
+ ParagraphProperties,
+ Style,
+ TableCellProperties,
+ TextProperties,
+ )
+
+ if style is None:
+ return None
+ style_key = json.dumps(style)
+ if style_key in self._style_dict:
+ return self._style_dict[style_key]
+ name = f"pd{len(self._style_dict)+1}"
+ self._style_dict[style_key] = name
+ odf_style = Style(name=name, family="table-cell")
+ if "font" in style:
+ font = style["font"]
+ if font.get("bold", False):
+ odf_style.addElement(TextProperties(fontweight="bold"))
+ if "borders" in style:
+ borders = style["borders"]
+ for side, thickness in borders.items():
+ thickness_translation = {"thin": "0.75pt solid #000000"}
+ odf_style.addElement(
+ TableCellProperties(
+ attributes={f"border{side}": thickness_translation[thickness]}
+ )
+ )
+ if "alignment" in style:
+ alignment = style["alignment"]
+ horizontal = alignment.get("horizontal")
+ if horizontal:
+ odf_style.addElement(ParagraphProperties(textalign=horizontal))
+ vertical = alignment.get("vertical")
+ if vertical:
+ odf_style.addElement(TableCellProperties(verticalalign=vertical))
+ self.book.styles.addElement(odf_style)
+ return name
+
+ def _create_freeze_panes(
+ self, sheet_name: str, freeze_panes: tuple[int, int]
+ ) -> None:
+ """
+ Create freeze panes in the sheet.
+
+ Parameters
+ ----------
+ sheet_name : str
+ Name of the spreadsheet
+ freeze_panes : tuple of (int, int)
+ Freeze pane location x and y
+ """
+ from odf.config import (
+ ConfigItem,
+ ConfigItemMapEntry,
+ ConfigItemMapIndexed,
+ ConfigItemMapNamed,
+ ConfigItemSet,
+ )
+
+ config_item_set = ConfigItemSet(name="ooo:view-settings")
+ self.book.settings.addElement(config_item_set)
+
+ config_item_map_indexed = ConfigItemMapIndexed(name="Views")
+ config_item_set.addElement(config_item_map_indexed)
+
+ config_item_map_entry = ConfigItemMapEntry()
+ config_item_map_indexed.addElement(config_item_map_entry)
+
+ config_item_map_named = ConfigItemMapNamed(name="Tables")
+ config_item_map_entry.addElement(config_item_map_named)
+
+ config_item_map_entry = ConfigItemMapEntry(name=sheet_name)
+ config_item_map_named.addElement(config_item_map_entry)
+
+ config_item_map_entry.addElement(
+ ConfigItem(name="HorizontalSplitMode", type="short", text="2")
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(name="VerticalSplitMode", type="short", text="2")
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(
+ name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0])
+ )
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(
+ name="VerticalSplitPosition", type="int", text=str(freeze_panes[1])
+ )
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0]))
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1]))
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py
new file mode 100644
index 0000000000000000000000000000000000000000..c546443868a62aed062bf3fd41d80933e4fbc59e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py
@@ -0,0 +1,639 @@
+from __future__ import annotations
+
+import mmap
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ cast,
+)
+
+import numpy as np
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import (
+ BaseExcelReader,
+ ExcelWriter,
+)
+from pandas.io.excel._util import (
+ combine_kwargs,
+ validate_freeze_panes,
+)
+
+if TYPE_CHECKING:
+ from openpyxl import Workbook
+ from openpyxl.descriptors.serialisable import Serialisable
+
+ from pandas._typing import (
+ ExcelWriterIfSheetExists,
+ FilePath,
+ ReadBuffer,
+ Scalar,
+ StorageOptions,
+ WriteExcelBuffer,
+ )
+
+
+class OpenpyxlWriter(ExcelWriter):
+ _engine = "openpyxl"
+ _supported_extensions = (".xlsx", ".xlsm")
+
+ def __init__(
+ self,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format: str | None = None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict[str, Any] | None = None,
+ **kwargs,
+ ) -> None:
+ # Use the openpyxl module as the Excel writer.
+ from openpyxl.workbook import Workbook
+
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
+
+ super().__init__(
+ path,
+ mode=mode,
+ storage_options=storage_options,
+ if_sheet_exists=if_sheet_exists,
+ engine_kwargs=engine_kwargs,
+ )
+
+ # ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
+ # the file and later write to it
+ if "r+" in self._mode: # Load from existing workbook
+ from openpyxl import load_workbook
+
+ try:
+ self._book = load_workbook(self._handles.handle, **engine_kwargs)
+ except TypeError:
+ self._handles.handle.close()
+ raise
+ self._handles.handle.seek(0)
+ else:
+ # Create workbook object with default optimized_write=True.
+ try:
+ self._book = Workbook(**engine_kwargs)
+ except TypeError:
+ self._handles.handle.close()
+ raise
+
+ if self.book.worksheets:
+ self.book.remove(self.book.worksheets[0])
+
+ @property
+ def book(self) -> Workbook:
+ """
+ Book instance of class openpyxl.workbook.Workbook.
+
+ This attribute can be used to access engine-specific features.
+ """
+ return self._book
+
+ @property
+ def sheets(self) -> dict[str, Any]:
+ """Mapping of sheet names to sheet objects."""
+ result = {name: self.book[name] for name in self.book.sheetnames}
+ return result
+
+ def _save(self) -> None:
+ """
+ Save workbook to disk.
+ """
+ self.book.save(self._handles.handle)
+ if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap):
+ # truncate file to the written content
+ self._handles.handle.truncate()
+
+ @classmethod
+ def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:
+ """
+ Convert a style_dict to a set of kwargs suitable for initializing
+ or updating-on-copy an openpyxl v2 style object.
+
+ Parameters
+ ----------
+ style_dict : dict
+ A dict with zero or more of the following keys (or their synonyms).
+ 'font'
+ 'fill'
+ 'border' ('borders')
+ 'alignment'
+ 'number_format'
+ 'protection'
+
+ Returns
+ -------
+ style_kwargs : dict
+ A dict with the same, normalized keys as ``style_dict`` but each
+ value has been replaced with a native openpyxl style object of the
+ appropriate class.
+ """
+ _style_key_map = {"borders": "border"}
+
+ style_kwargs: dict[str, Serialisable] = {}
+ for k, v in style_dict.items():
+ k = _style_key_map.get(k, k)
+ _conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)
+ new_v = _conv_to_x(v)
+ if new_v:
+ style_kwargs[k] = new_v
+
+ return style_kwargs
+
+ @classmethod
+ def _convert_to_color(cls, color_spec):
+ """
+ Convert ``color_spec`` to an openpyxl v2 Color object.
+
+ Parameters
+ ----------
+ color_spec : str, dict
+ A 32-bit ARGB hex string, or a dict with zero or more of the
+ following keys.
+ 'rgb'
+ 'indexed'
+ 'auto'
+ 'theme'
+ 'tint'
+ 'index'
+ 'type'
+
+ Returns
+ -------
+ color : openpyxl.styles.Color
+ """
+ from openpyxl.styles import Color
+
+ if isinstance(color_spec, str):
+ return Color(color_spec)
+ else:
+ return Color(**color_spec)
+
+ @classmethod
+ def _convert_to_font(cls, font_dict):
+ """
+ Convert ``font_dict`` to an openpyxl v2 Font object.
+
+ Parameters
+ ----------
+ font_dict : dict
+ A dict with zero or more of the following keys (or their synonyms).
+ 'name'
+ 'size' ('sz')
+ 'bold' ('b')
+ 'italic' ('i')
+ 'underline' ('u')
+ 'strikethrough' ('strike')
+ 'color'
+ 'vertAlign' ('vertalign')
+ 'charset'
+ 'scheme'
+ 'family'
+ 'outline'
+ 'shadow'
+ 'condense'
+
+ Returns
+ -------
+ font : openpyxl.styles.Font
+ """
+ from openpyxl.styles import Font
+
+ _font_key_map = {
+ "sz": "size",
+ "b": "bold",
+ "i": "italic",
+ "u": "underline",
+ "strike": "strikethrough",
+ "vertalign": "vertAlign",
+ }
+
+ font_kwargs = {}
+ for k, v in font_dict.items():
+ k = _font_key_map.get(k, k)
+ if k == "color":
+ v = cls._convert_to_color(v)
+ font_kwargs[k] = v
+
+ return Font(**font_kwargs)
+
+ @classmethod
+ def _convert_to_stop(cls, stop_seq):
+ """
+ Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
+ suitable for initializing the ``GradientFill`` ``stop`` parameter.
+
+ Parameters
+ ----------
+ stop_seq : iterable
+ An iterable that yields objects suitable for consumption by
+ ``_convert_to_color``.
+
+ Returns
+ -------
+ stop : list of openpyxl.styles.Color
+ """
+ return map(cls._convert_to_color, stop_seq)
+
+ @classmethod
+ def _convert_to_fill(cls, fill_dict: dict[str, Any]):
+ """
+ Convert ``fill_dict`` to an openpyxl v2 Fill object.
+
+ Parameters
+ ----------
+ fill_dict : dict
+ A dict with one or more of the following keys (or their synonyms),
+ 'fill_type' ('patternType', 'patterntype')
+ 'start_color' ('fgColor', 'fgcolor')
+ 'end_color' ('bgColor', 'bgcolor')
+ or one or more of the following keys (or their synonyms).
+ 'type' ('fill_type')
+ 'degree'
+ 'left'
+ 'right'
+ 'top'
+ 'bottom'
+ 'stop'
+
+ Returns
+ -------
+ fill : openpyxl.styles.Fill
+ """
+ from openpyxl.styles import (
+ GradientFill,
+ PatternFill,
+ )
+
+ _pattern_fill_key_map = {
+ "patternType": "fill_type",
+ "patterntype": "fill_type",
+ "fgColor": "start_color",
+ "fgcolor": "start_color",
+ "bgColor": "end_color",
+ "bgcolor": "end_color",
+ }
+
+ _gradient_fill_key_map = {"fill_type": "type"}
+
+ pfill_kwargs = {}
+ gfill_kwargs = {}
+ for k, v in fill_dict.items():
+ pk = _pattern_fill_key_map.get(k)
+ gk = _gradient_fill_key_map.get(k)
+ if pk in ["start_color", "end_color"]:
+ v = cls._convert_to_color(v)
+ if gk == "stop":
+ v = cls._convert_to_stop(v)
+ if pk:
+ pfill_kwargs[pk] = v
+ elif gk:
+ gfill_kwargs[gk] = v
+ else:
+ pfill_kwargs[k] = v
+ gfill_kwargs[k] = v
+
+ try:
+ return PatternFill(**pfill_kwargs)
+ except TypeError:
+ return GradientFill(**gfill_kwargs)
+
+ @classmethod
+ def _convert_to_side(cls, side_spec):
+ """
+ Convert ``side_spec`` to an openpyxl v2 Side object.
+
+ Parameters
+ ----------
+ side_spec : str, dict
+ A string specifying the border style, or a dict with zero or more
+ of the following keys (or their synonyms).
+ 'style' ('border_style')
+ 'color'
+
+ Returns
+ -------
+ side : openpyxl.styles.Side
+ """
+ from openpyxl.styles import Side
+
+ _side_key_map = {"border_style": "style"}
+
+ if isinstance(side_spec, str):
+ return Side(style=side_spec)
+
+ side_kwargs = {}
+ for k, v in side_spec.items():
+ k = _side_key_map.get(k, k)
+ if k == "color":
+ v = cls._convert_to_color(v)
+ side_kwargs[k] = v
+
+ return Side(**side_kwargs)
+
+ @classmethod
+ def _convert_to_border(cls, border_dict):
+ """
+ Convert ``border_dict`` to an openpyxl v2 Border object.
+
+ Parameters
+ ----------
+ border_dict : dict
+ A dict with zero or more of the following keys (or their synonyms).
+ 'left'
+ 'right'
+ 'top'
+ 'bottom'
+ 'diagonal'
+ 'diagonal_direction'
+ 'vertical'
+ 'horizontal'
+ 'diagonalUp' ('diagonalup')
+ 'diagonalDown' ('diagonaldown')
+ 'outline'
+
+ Returns
+ -------
+ border : openpyxl.styles.Border
+ """
+ from openpyxl.styles import Border
+
+ _border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}
+
+ border_kwargs = {}
+ for k, v in border_dict.items():
+ k = _border_key_map.get(k, k)
+ if k == "color":
+ v = cls._convert_to_color(v)
+ if k in ["left", "right", "top", "bottom", "diagonal"]:
+ v = cls._convert_to_side(v)
+ border_kwargs[k] = v
+
+ return Border(**border_kwargs)
+
+ @classmethod
+ def _convert_to_alignment(cls, alignment_dict):
+ """
+ Convert ``alignment_dict`` to an openpyxl v2 Alignment object.
+
+ Parameters
+ ----------
+ alignment_dict : dict
+ A dict with zero or more of the following keys (or their synonyms).
+ 'horizontal'
+ 'vertical'
+ 'text_rotation'
+ 'wrap_text'
+ 'shrink_to_fit'
+ 'indent'
+ Returns
+ -------
+ alignment : openpyxl.styles.Alignment
+ """
+ from openpyxl.styles import Alignment
+
+ return Alignment(**alignment_dict)
+
+ @classmethod
+ def _convert_to_number_format(cls, number_format_dict):
+ """
+ Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
+ initializer.
+
+ Parameters
+ ----------
+ number_format_dict : dict
+ A dict with zero or more of the following keys.
+ 'format_code' : str
+
+ Returns
+ -------
+ number_format : str
+ """
+ return number_format_dict["format_code"]
+
+ @classmethod
+ def _convert_to_protection(cls, protection_dict):
+ """
+ Convert ``protection_dict`` to an openpyxl v2 Protection object.
+
+ Parameters
+ ----------
+ protection_dict : dict
+ A dict with zero or more of the following keys.
+ 'locked'
+ 'hidden'
+
+ Returns
+ -------
+ """
+ from openpyxl.styles import Protection
+
+ return Protection(**protection_dict)
+
+ def _write_cells(
+ self,
+ cells,
+ sheet_name: str | None = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ freeze_panes: tuple[int, int] | None = None,
+ ) -> None:
+ # Write the frame cells using openpyxl.
+ sheet_name = self._get_sheet_name(sheet_name)
+
+ _style_cache: dict[str, dict[str, Serialisable]] = {}
+
+ if sheet_name in self.sheets and self._if_sheet_exists != "new":
+ if "r+" in self._mode:
+ if self._if_sheet_exists == "replace":
+ old_wks = self.sheets[sheet_name]
+ target_index = self.book.index(old_wks)
+ del self.book[sheet_name]
+ wks = self.book.create_sheet(sheet_name, target_index)
+ elif self._if_sheet_exists == "error":
+ raise ValueError(
+ f"Sheet '{sheet_name}' already exists and "
+ f"if_sheet_exists is set to 'error'."
+ )
+ elif self._if_sheet_exists == "overlay":
+ wks = self.sheets[sheet_name]
+ else:
+ raise ValueError(
+ f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. "
+ "Valid options are 'error', 'new', 'replace' and 'overlay'."
+ )
+ else:
+ wks = self.sheets[sheet_name]
+ else:
+ wks = self.book.create_sheet()
+ wks.title = sheet_name
+
+ if validate_freeze_panes(freeze_panes):
+ freeze_panes = cast(tuple[int, int], freeze_panes)
+ wks.freeze_panes = wks.cell(
+ row=freeze_panes[0] + 1, column=freeze_panes[1] + 1
+ )
+
+ for cell in cells:
+ xcell = wks.cell(
+ row=startrow + cell.row + 1, column=startcol + cell.col + 1
+ )
+ xcell.value, fmt = self._value_with_fmt(cell.val)
+ if fmt:
+ xcell.number_format = fmt
+
+ style_kwargs: dict[str, Serialisable] | None = {}
+ if cell.style:
+ key = str(cell.style)
+ style_kwargs = _style_cache.get(key)
+ if style_kwargs is None:
+ style_kwargs = self._convert_to_style_kwargs(cell.style)
+ _style_cache[key] = style_kwargs
+
+ if style_kwargs:
+ for k, v in style_kwargs.items():
+ setattr(xcell, k, v)
+
+ if cell.mergestart is not None and cell.mergeend is not None:
+ wks.merge_cells(
+ start_row=startrow + cell.row + 1,
+ start_column=startcol + cell.col + 1,
+ end_column=startcol + cell.mergeend + 1,
+ end_row=startrow + cell.mergestart + 1,
+ )
+
+ # When cells are merged only the top-left cell is preserved
+ # The behaviour of the other cells in a merged range is
+ # undefined
+ if style_kwargs:
+ first_row = startrow + cell.row + 1
+ last_row = startrow + cell.mergestart + 1
+ first_col = startcol + cell.col + 1
+ last_col = startcol + cell.mergeend + 1
+
+ for row in range(first_row, last_row + 1):
+ for col in range(first_col, last_col + 1):
+ if row == first_row and col == first_col:
+ # Ignore first cell. It is already handled.
+ continue
+ xcell = wks.cell(column=col, row=row)
+ for k, v in style_kwargs.items():
+ setattr(xcell, k, v)
+
+
+class OpenpyxlReader(BaseExcelReader["Workbook"]):
+ @doc(storage_options=_shared_docs["storage_options"])
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Reader using openpyxl engine.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object or Workbook
+ Object to be parsed.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ import_optional_dependency("openpyxl")
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[Workbook]:
+ from openpyxl import Workbook
+
+ return Workbook
+
+ def load_workbook(
+ self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
+ ) -> Workbook:
+ from openpyxl import load_workbook
+
+ default_kwargs = {"read_only": True, "data_only": True, "keep_links": False}
+
+ return load_workbook(
+ filepath_or_buffer,
+ **(default_kwargs | engine_kwargs),
+ )
+
+ @property
+ def sheet_names(self) -> list[str]:
+ return [sheet.title for sheet in self.book.worksheets]
+
+ def get_sheet_by_name(self, name: str):
+ self.raise_if_bad_sheet_by_name(name)
+ return self.book[name]
+
+ def get_sheet_by_index(self, index: int):
+ self.raise_if_bad_sheet_by_index(index)
+ return self.book.worksheets[index]
+
+ def _convert_cell(self, cell) -> Scalar:
+ from openpyxl.cell.cell import (
+ TYPE_ERROR,
+ TYPE_NUMERIC,
+ )
+
+ if cell.value is None:
+ return "" # compat with xlrd
+ elif cell.data_type == TYPE_ERROR:
+ return np.nan
+ elif cell.data_type == TYPE_NUMERIC:
+ val = int(cell.value)
+ if val == cell.value:
+ return val
+ return float(cell.value)
+
+ return cell.value
+
+ def get_sheet_data(
+ self, sheet, file_rows_needed: int | None = None
+ ) -> list[list[Scalar]]:
+ if self.book.read_only:
+ sheet.reset_dimensions()
+
+ data: list[list[Scalar]] = []
+ last_row_with_data = -1
+ for row_number, row in enumerate(sheet.rows):
+ converted_row = [self._convert_cell(cell) for cell in row]
+ while converted_row and converted_row[-1] == "":
+ # trim trailing empty elements
+ converted_row.pop()
+ if converted_row:
+ last_row_with_data = row_number
+ data.append(converted_row)
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
+ break
+
+ # Trim trailing empty rows
+ data = data[: last_row_with_data + 1]
+
+ if len(data) > 0:
+ # extend rows to max width
+ max_width = max(len(data_row) for data_row in data)
+ if min(len(data_row) for data_row in data) < max_width:
+ empty_cell: list[Scalar] = [""]
+ data = [
+ data_row + (max_width - len(data_row)) * empty_cell
+ for data_row in data
+ ]
+
+ return data
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6e42616c20438fa4cab16e94b5d16a01c9c61df
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py
@@ -0,0 +1,127 @@
+# pyright: reportMissingImports=false
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import BaseExcelReader
+
+if TYPE_CHECKING:
+ from pyxlsb import Workbook
+
+ from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+ Scalar,
+ StorageOptions,
+ )
+
+
+class PyxlsbReader(BaseExcelReader["Workbook"]):
+ @doc(storage_options=_shared_docs["storage_options"])
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Reader using pyxlsb engine.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object, or Workbook
+ Object to be parsed.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ import_optional_dependency("pyxlsb")
+ # This will call load_workbook on the filepath or buffer
+ # And set the result to the book-attribute
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[Workbook]:
+ from pyxlsb import Workbook
+
+ return Workbook
+
+ def load_workbook(
+ self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
+ ) -> Workbook:
+ from pyxlsb import open_workbook
+
+ # TODO: hack in buffer capability
+ # This might need some modifications to the Pyxlsb library
+ # Actual work for opening it is in xlsbpackage.py, line 20-ish
+
+ return open_workbook(filepath_or_buffer, **engine_kwargs)
+
+ @property
+ def sheet_names(self) -> list[str]:
+ return self.book.sheets
+
+ def get_sheet_by_name(self, name: str):
+ self.raise_if_bad_sheet_by_name(name)
+ return self.book.get_sheet(name)
+
+ def get_sheet_by_index(self, index: int):
+ self.raise_if_bad_sheet_by_index(index)
+ # pyxlsb sheets are indexed from 1 onwards
+ # There's a fix for this in the source, but the pypi package doesn't have it
+ return self.book.get_sheet(index + 1)
+
+ def _convert_cell(self, cell) -> Scalar:
+ # TODO: there is no way to distinguish between floats and datetimes in pyxlsb
+ # This means that there is no way to read datetime types from an xlsb file yet
+ if cell.v is None:
+ return "" # Prevents non-named columns from not showing up as Unnamed: i
+ if isinstance(cell.v, float):
+ val = int(cell.v)
+ if val == cell.v:
+ return val
+ else:
+ return float(cell.v)
+
+ return cell.v
+
+ def get_sheet_data(
+ self,
+ sheet,
+ file_rows_needed: int | None = None,
+ ) -> list[list[Scalar]]:
+ data: list[list[Scalar]] = []
+ previous_row_number = -1
+ # When sparse=True the rows can have different lengths and empty rows are
+ # not returned. The cells are namedtuples of row, col, value (r, c, v).
+ for row in sheet.rows(sparse=True):
+ row_number = row[0].r
+ converted_row = [self._convert_cell(cell) for cell in row]
+ while converted_row and converted_row[-1] == "":
+ # trim trailing empty elements
+ converted_row.pop()
+ if converted_row:
+ data.extend([[]] * (row_number - previous_row_number - 1))
+ data.append(converted_row)
+ previous_row_number = row_number
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
+ break
+ if data:
+ # extend rows to max_width
+ max_width = max(len(data_row) for data_row in data)
+ if min(len(data_row) for data_row in data) < max_width:
+ empty_cell: list[Scalar] = [""]
+ data = [
+ data_row + (max_width - len(data_row)) * empty_cell
+ for data_row in data
+ ]
+ return data
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_util.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7a1fcb8052e391d0853be64866663f4e6de9d08
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_util.py
@@ -0,0 +1,334 @@
+from __future__ import annotations
+
+from collections.abc import (
+ Hashable,
+ Iterable,
+ MutableMapping,
+ Sequence,
+)
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Literal,
+ TypeVar,
+ overload,
+)
+
+from pandas.compat._optional import import_optional_dependency
+
+from pandas.core.dtypes.common import (
+ is_integer,
+ is_list_like,
+)
+
+if TYPE_CHECKING:
+ from pandas.io.excel._base import ExcelWriter
+
+ ExcelWriter_t = type[ExcelWriter]
+ usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object])
+
+_writers: MutableMapping[str, ExcelWriter_t] = {}
+
+
+def register_writer(klass: ExcelWriter_t) -> None:
+ """
+ Add engine to the excel writer registry.io.excel.
+
+ You must use this method to integrate with ``to_excel``.
+
+ Parameters
+ ----------
+ klass : ExcelWriter
+ """
+ if not callable(klass):
+ raise ValueError("Can only register callables as engines")
+ engine_name = klass._engine
+ _writers[engine_name] = klass
+
+
+def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str:
+ """
+ Return the default reader/writer for the given extension.
+
+ Parameters
+ ----------
+ ext : str
+ The excel file extension for which to get the default engine.
+ mode : str {'reader', 'writer'}
+ Whether to get the default engine for reading or writing.
+ Either 'reader' or 'writer'
+
+ Returns
+ -------
+ str
+ The default engine for the extension.
+ """
+ _default_readers = {
+ "xlsx": "openpyxl",
+ "xlsm": "openpyxl",
+ "xlsb": "pyxlsb",
+ "xls": "xlrd",
+ "ods": "odf",
+ }
+ _default_writers = {
+ "xlsx": "openpyxl",
+ "xlsm": "openpyxl",
+ "xlsb": "pyxlsb",
+ "ods": "odf",
+ }
+ assert mode in ["reader", "writer"]
+ if mode == "writer":
+ # Prefer xlsxwriter over openpyxl if installed
+ xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")
+ if xlsxwriter:
+ _default_writers["xlsx"] = "xlsxwriter"
+ return _default_writers[ext]
+ else:
+ return _default_readers[ext]
+
+
+def get_writer(engine_name: str) -> ExcelWriter_t:
+ try:
+ return _writers[engine_name]
+ except KeyError as err:
+ raise ValueError(f"No Excel writer '{engine_name}'") from err
+
+
+def _excel2num(x: str) -> int:
+ """
+ Convert Excel column name like 'AB' to 0-based column index.
+
+ Parameters
+ ----------
+ x : str
+ The Excel column name to convert to a 0-based column index.
+
+ Returns
+ -------
+ num : int
+ The column index corresponding to the name.
+
+ Raises
+ ------
+ ValueError
+ Part of the Excel column name was invalid.
+ """
+ index = 0
+
+ for c in x.upper().strip():
+ cp = ord(c)
+
+ if cp < ord("A") or cp > ord("Z"):
+ raise ValueError(f"Invalid column name: {x}")
+
+ index = index * 26 + cp - ord("A") + 1
+
+ return index - 1
+
+
+def _range2cols(areas: str) -> list[int]:
+ """
+ Convert comma separated list of column names and ranges to indices.
+
+ Parameters
+ ----------
+ areas : str
+ A string containing a sequence of column ranges (or areas).
+
+ Returns
+ -------
+ cols : list
+ A list of 0-based column indices.
+
+ Examples
+ --------
+ >>> _range2cols('A:E')
+ [0, 1, 2, 3, 4]
+ >>> _range2cols('A,C,Z:AB')
+ [0, 2, 25, 26, 27]
+ """
+ cols: list[int] = []
+
+ for rng in areas.split(","):
+ if ":" in rng:
+ rngs = rng.split(":")
+ cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))
+ else:
+ cols.append(_excel2num(rng))
+
+ return cols
+
+
+@overload
+def maybe_convert_usecols(usecols: str | list[int]) -> list[int]:
+ ...
+
+
+@overload
+def maybe_convert_usecols(usecols: list[str]) -> list[str]:
+ ...
+
+
+@overload
+def maybe_convert_usecols(usecols: usecols_func) -> usecols_func:
+ ...
+
+
+@overload
+def maybe_convert_usecols(usecols: None) -> None:
+ ...
+
+
+def maybe_convert_usecols(
+ usecols: str | list[int] | list[str] | usecols_func | None,
+) -> None | list[int] | list[str] | usecols_func:
+ """
+ Convert `usecols` into a compatible format for parsing in `parsers.py`.
+
+ Parameters
+ ----------
+ usecols : object
+ The use-columns object to potentially convert.
+
+ Returns
+ -------
+ converted : object
+ The compatible format of `usecols`.
+ """
+ if usecols is None:
+ return usecols
+
+ if is_integer(usecols):
+ raise ValueError(
+ "Passing an integer for `usecols` is no longer supported. "
+ "Please pass in a list of int from 0 to `usecols` inclusive instead."
+ )
+
+ if isinstance(usecols, str):
+ return _range2cols(usecols)
+
+ return usecols
+
+
+@overload
+def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:
+ ...
+
+
+@overload
+def validate_freeze_panes(freeze_panes: None) -> Literal[False]:
+ ...
+
+
+def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:
+ if freeze_panes is not None:
+ if len(freeze_panes) == 2 and all(
+ isinstance(item, int) for item in freeze_panes
+ ):
+ return True
+
+ raise ValueError(
+ "freeze_panes must be of form (row, column) "
+ "where row and column are integers"
+ )
+
+ # freeze_panes wasn't specified, return False so it won't be applied
+ # to output sheet
+ return False
+
+
+def fill_mi_header(
+ row: list[Hashable], control_row: list[bool]
+) -> tuple[list[Hashable], list[bool]]:
+ """
+ Forward fill blank entries in row but only inside the same parent index.
+
+ Used for creating headers in Multiindex.
+
+ Parameters
+ ----------
+ row : list
+ List of items in a single row.
+ control_row : list of bool
+ Helps to determine if particular column is in same parent index as the
+ previous value. Used to stop propagation of empty cells between
+ different indexes.
+
+ Returns
+ -------
+ Returns changed row and control_row
+ """
+ last = row[0]
+ for i in range(1, len(row)):
+ if not control_row[i]:
+ last = row[i]
+
+ if row[i] == "" or row[i] is None:
+ row[i] = last
+ else:
+ control_row[i] = False
+ last = row[i]
+
+ return row, control_row
+
+
+def pop_header_name(
+ row: list[Hashable], index_col: int | Sequence[int]
+) -> tuple[Hashable | None, list[Hashable]]:
+ """
+ Pop the header name for MultiIndex parsing.
+
+ Parameters
+ ----------
+ row : list
+ The data row to parse for the header name.
+ index_col : int, list
+ The index columns for our data. Assumed to be non-null.
+
+ Returns
+ -------
+ header_name : str
+ The extracted header name.
+ trimmed_row : list
+ The original data row with the header name removed.
+ """
+ # Pop out header name and fill w/blank.
+ if is_list_like(index_col):
+ assert isinstance(index_col, Iterable)
+ i = max(index_col)
+ else:
+ assert not isinstance(index_col, Iterable)
+ i = index_col
+
+ header_name = row[i]
+ header_name = None if header_name == "" else header_name
+
+ return header_name, row[:i] + [""] + row[i + 1 :]
+
+
+def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:
+ """
+ Used to combine two sources of kwargs for the backend engine.
+
+ Use of kwargs is deprecated, this function is solely for use in 1.3 and should
+ be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs
+ or kwargs must be None or empty respectively.
+
+ Parameters
+ ----------
+ engine_kwargs: dict
+ kwargs to be passed through to the engine.
+ kwargs: dict
+ kwargs to be psased through to the engine (deprecated)
+
+ Returns
+ -------
+ engine_kwargs combined with kwargs
+ """
+ if engine_kwargs is None:
+ result = {}
+ else:
+ result = engine_kwargs.copy()
+ result.update(kwargs)
+ return result
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py
new file mode 100644
index 0000000000000000000000000000000000000000..a444970792e6e65faf3d8947b721fff59487d994
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py
@@ -0,0 +1,143 @@
+from __future__ import annotations
+
+from datetime import time
+import math
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import BaseExcelReader
+
+if TYPE_CHECKING:
+ from xlrd import Book
+
+ from pandas._typing import (
+ Scalar,
+ StorageOptions,
+ )
+
+
+class XlrdReader(BaseExcelReader["Book"]):
+ @doc(storage_options=_shared_docs["storage_options"])
+ def __init__(
+ self,
+ filepath_or_buffer,
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Reader using xlrd engine.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object or Workbook
+ Object to be parsed.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ err_msg = "Install xlrd >= 2.0.1 for xls Excel support"
+ import_optional_dependency("xlrd", extra=err_msg)
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[Book]:
+ from xlrd import Book
+
+ return Book
+
+ def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book:
+ from xlrd import open_workbook
+
+ if hasattr(filepath_or_buffer, "read"):
+ data = filepath_or_buffer.read()
+ return open_workbook(file_contents=data, **engine_kwargs)
+ else:
+ return open_workbook(filepath_or_buffer, **engine_kwargs)
+
+ @property
+ def sheet_names(self):
+ return self.book.sheet_names()
+
+ def get_sheet_by_name(self, name):
+ self.raise_if_bad_sheet_by_name(name)
+ return self.book.sheet_by_name(name)
+
+ def get_sheet_by_index(self, index):
+ self.raise_if_bad_sheet_by_index(index)
+ return self.book.sheet_by_index(index)
+
+ def get_sheet_data(
+ self, sheet, file_rows_needed: int | None = None
+ ) -> list[list[Scalar]]:
+ from xlrd import (
+ XL_CELL_BOOLEAN,
+ XL_CELL_DATE,
+ XL_CELL_ERROR,
+ XL_CELL_NUMBER,
+ xldate,
+ )
+
+ epoch1904 = self.book.datemode
+
+ def _parse_cell(cell_contents, cell_typ):
+ """
+ converts the contents of the cell into a pandas appropriate object
+ """
+ if cell_typ == XL_CELL_DATE:
+ # Use the newer xlrd datetime handling.
+ try:
+ cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
+ except OverflowError:
+ return cell_contents
+
+ # Excel doesn't distinguish between dates and time,
+ # so we treat dates on the epoch as times only.
+ # Also, Excel supports 1900 and 1904 epochs.
+ year = (cell_contents.timetuple())[0:3]
+ if (not epoch1904 and year == (1899, 12, 31)) or (
+ epoch1904 and year == (1904, 1, 1)
+ ):
+ cell_contents = time(
+ cell_contents.hour,
+ cell_contents.minute,
+ cell_contents.second,
+ cell_contents.microsecond,
+ )
+
+ elif cell_typ == XL_CELL_ERROR:
+ cell_contents = np.nan
+ elif cell_typ == XL_CELL_BOOLEAN:
+ cell_contents = bool(cell_contents)
+ elif cell_typ == XL_CELL_NUMBER:
+ # GH5394 - Excel 'numbers' are always floats
+ # it's a minimal perf hit and less surprising
+ if math.isfinite(cell_contents):
+ # GH54564 - don't attempt to convert NaN/Inf
+ val = int(cell_contents)
+ if val == cell_contents:
+ cell_contents = val
+ return cell_contents
+
+ data = []
+
+ nrows = sheet.nrows
+ if file_rows_needed is not None:
+ nrows = min(nrows, file_rows_needed)
+ for i in range(nrows):
+ row = [
+ _parse_cell(value, typ)
+ for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
+ ]
+ data.append(row)
+
+ return data
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py
new file mode 100644
index 0000000000000000000000000000000000000000..6eacac8c064fb1f297cd46b8ab0361ceb22067b4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py
@@ -0,0 +1,284 @@
+from __future__ import annotations
+
+import json
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
+
+from pandas.io.excel._base import ExcelWriter
+from pandas.io.excel._util import (
+ combine_kwargs,
+ validate_freeze_panes,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ ExcelWriterIfSheetExists,
+ FilePath,
+ StorageOptions,
+ WriteExcelBuffer,
+ )
+
+
+class _XlsxStyler:
+ # Map from openpyxl-oriented styles to flatter xlsxwriter representation
+ # Ordering necessary for both determinism and because some are keyed by
+ # prefixes of others.
+ STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
+ "font": [
+ (("name",), "font_name"),
+ (("sz",), "font_size"),
+ (("size",), "font_size"),
+ (("color", "rgb"), "font_color"),
+ (("color",), "font_color"),
+ (("b",), "bold"),
+ (("bold",), "bold"),
+ (("i",), "italic"),
+ (("italic",), "italic"),
+ (("u",), "underline"),
+ (("underline",), "underline"),
+ (("strike",), "font_strikeout"),
+ (("vertAlign",), "font_script"),
+ (("vertalign",), "font_script"),
+ ],
+ "number_format": [(("format_code",), "num_format"), ((), "num_format")],
+ "protection": [(("locked",), "locked"), (("hidden",), "hidden")],
+ "alignment": [
+ (("horizontal",), "align"),
+ (("vertical",), "valign"),
+ (("text_rotation",), "rotation"),
+ (("wrap_text",), "text_wrap"),
+ (("indent",), "indent"),
+ (("shrink_to_fit",), "shrink"),
+ ],
+ "fill": [
+ (("patternType",), "pattern"),
+ (("patterntype",), "pattern"),
+ (("fill_type",), "pattern"),
+ (("start_color", "rgb"), "fg_color"),
+ (("fgColor", "rgb"), "fg_color"),
+ (("fgcolor", "rgb"), "fg_color"),
+ (("start_color",), "fg_color"),
+ (("fgColor",), "fg_color"),
+ (("fgcolor",), "fg_color"),
+ (("end_color", "rgb"), "bg_color"),
+ (("bgColor", "rgb"), "bg_color"),
+ (("bgcolor", "rgb"), "bg_color"),
+ (("end_color",), "bg_color"),
+ (("bgColor",), "bg_color"),
+ (("bgcolor",), "bg_color"),
+ ],
+ "border": [
+ (("color", "rgb"), "border_color"),
+ (("color",), "border_color"),
+ (("style",), "border"),
+ (("top", "color", "rgb"), "top_color"),
+ (("top", "color"), "top_color"),
+ (("top", "style"), "top"),
+ (("top",), "top"),
+ (("right", "color", "rgb"), "right_color"),
+ (("right", "color"), "right_color"),
+ (("right", "style"), "right"),
+ (("right",), "right"),
+ (("bottom", "color", "rgb"), "bottom_color"),
+ (("bottom", "color"), "bottom_color"),
+ (("bottom", "style"), "bottom"),
+ (("bottom",), "bottom"),
+ (("left", "color", "rgb"), "left_color"),
+ (("left", "color"), "left_color"),
+ (("left", "style"), "left"),
+ (("left",), "left"),
+ ],
+ }
+
+ @classmethod
+ def convert(cls, style_dict, num_format_str=None):
+ """
+ converts a style_dict to an xlsxwriter format dict
+
+ Parameters
+ ----------
+ style_dict : style dictionary to convert
+ num_format_str : optional number format string
+ """
+ # Create a XlsxWriter format object.
+ props = {}
+
+ if num_format_str is not None:
+ props["num_format"] = num_format_str
+
+ if style_dict is None:
+ return props
+
+ if "borders" in style_dict:
+ style_dict = style_dict.copy()
+ style_dict["border"] = style_dict.pop("borders")
+
+ for style_group_key, style_group in style_dict.items():
+ for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
+ # src is a sequence of keys into a nested dict
+ # dst is a flat key
+ if dst in props:
+ continue
+ v = style_group
+ for k in src:
+ try:
+ v = v[k]
+ except (KeyError, TypeError):
+ break
+ else:
+ props[dst] = v
+
+ if isinstance(props.get("pattern"), str):
+ # TODO: support other fill patterns
+ props["pattern"] = 0 if props["pattern"] == "none" else 1
+
+ for k in ["border", "top", "right", "bottom", "left"]:
+ if isinstance(props.get(k), str):
+ try:
+ props[k] = [
+ "none",
+ "thin",
+ "medium",
+ "dashed",
+ "dotted",
+ "thick",
+ "double",
+ "hair",
+ "mediumDashed",
+ "dashDot",
+ "mediumDashDot",
+ "dashDotDot",
+ "mediumDashDotDot",
+ "slantDashDot",
+ ].index(props[k])
+ except ValueError:
+ props[k] = 2
+
+ if isinstance(props.get("font_script"), str):
+ props["font_script"] = ["baseline", "superscript", "subscript"].index(
+ props["font_script"]
+ )
+
+ if isinstance(props.get("underline"), str):
+ props["underline"] = {
+ "none": 0,
+ "single": 1,
+ "double": 2,
+ "singleAccounting": 33,
+ "doubleAccounting": 34,
+ }[props["underline"]]
+
+ # GH 30107 - xlsxwriter uses different name
+ if props.get("valign") == "center":
+ props["valign"] = "vcenter"
+
+ return props
+
+
+class XlsxWriter(ExcelWriter):
+ _engine = "xlsxwriter"
+ _supported_extensions = (".xlsx",)
+
+ def __init__(
+ self,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format: str | None = None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict[str, Any] | None = None,
+ **kwargs,
+ ) -> None:
+ # Use the xlsxwriter module as the Excel writer.
+ from xlsxwriter import Workbook
+
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
+
+ if mode == "a":
+ raise ValueError("Append mode is not supported with xlsxwriter!")
+
+ super().__init__(
+ path,
+ engine=engine,
+ date_format=date_format,
+ datetime_format=datetime_format,
+ mode=mode,
+ storage_options=storage_options,
+ if_sheet_exists=if_sheet_exists,
+ engine_kwargs=engine_kwargs,
+ )
+
+ try:
+ self._book = Workbook(self._handles.handle, **engine_kwargs)
+ except TypeError:
+ self._handles.handle.close()
+ raise
+
+ @property
+ def book(self):
+ """
+ Book instance of class xlsxwriter.Workbook.
+
+ This attribute can be used to access engine-specific features.
+ """
+ return self._book
+
+ @property
+ def sheets(self) -> dict[str, Any]:
+ result = self.book.sheetnames
+ return result
+
+ def _save(self) -> None:
+ """
+ Save workbook to disk.
+ """
+ self.book.close()
+
+ def _write_cells(
+ self,
+ cells,
+ sheet_name: str | None = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ freeze_panes: tuple[int, int] | None = None,
+ ) -> None:
+ # Write the frame cells using xlsxwriter.
+ sheet_name = self._get_sheet_name(sheet_name)
+
+ wks = self.book.get_worksheet_by_name(sheet_name)
+ if wks is None:
+ wks = self.book.add_worksheet(sheet_name)
+
+ style_dict = {"null": None}
+
+ if validate_freeze_panes(freeze_panes):
+ wks.freeze_panes(*(freeze_panes))
+
+ for cell in cells:
+ val, fmt = self._value_with_fmt(cell.val)
+
+ stylekey = json.dumps(cell.style)
+ if fmt:
+ stylekey += fmt
+
+ if stylekey in style_dict:
+ style = style_dict[stylekey]
+ else:
+ style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
+ style_dict[stylekey] = style
+
+ if cell.mergestart is not None and cell.mergeend is not None:
+ wks.merge_range(
+ startrow + cell.row,
+ startcol + cell.col,
+ startrow + cell.mergestart,
+ startcol + cell.mergeend,
+ val,
+ style,
+ )
+ else:
+ wks.write(startrow + cell.row, startcol + cell.col, val, style)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/gbq.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/gbq.py
new file mode 100644
index 0000000000000000000000000000000000000000..350002bf461ff91f477371c1570e8cbf2ee090bb
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/gbq.py
@@ -0,0 +1,255 @@
+""" Google BigQuery support """
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
+import warnings
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._exceptions import find_stack_level
+
+if TYPE_CHECKING:
+ import google.auth
+
+ from pandas import DataFrame
+
+
+def _try_import():
+ # since pandas is a dependency of pandas-gbq
+ # we need to import on first use
+ msg = (
+ "pandas-gbq is required to load data from Google BigQuery. "
+ "See the docs: https://pandas-gbq.readthedocs.io."
+ )
+ pandas_gbq = import_optional_dependency("pandas_gbq", extra=msg)
+ return pandas_gbq
+
+
+def read_gbq(
+ query: str,
+ project_id: str | None = None,
+ index_col: str | None = None,
+ col_order: list[str] | None = None,
+ reauth: bool = False,
+ auth_local_webserver: bool = True,
+ dialect: str | None = None,
+ location: str | None = None,
+ configuration: dict[str, Any] | None = None,
+ credentials: google.auth.credentials.Credentials | None = None,
+ use_bqstorage_api: bool | None = None,
+ max_results: int | None = None,
+ progress_bar_type: str | None = None,
+) -> DataFrame:
+ """
+ Load data from Google BigQuery.
+
+ .. deprecated:: 2.2.0
+
+ Please use ``pandas_gbq.read_gbq`` instead.
+
+ This function requires the `pandas-gbq package
+ `__.
+
+ See the `How to authenticate with Google BigQuery
+ `__
+ guide for authentication instructions.
+
+ Parameters
+ ----------
+ query : str
+ SQL-Like Query to return data values.
+ project_id : str, optional
+ Google BigQuery Account project ID. Optional when available from
+ the environment.
+ index_col : str, optional
+ Name of result column to use for index in results DataFrame.
+ col_order : list(str), optional
+ List of BigQuery column names in the desired order for results
+ DataFrame.
+ reauth : bool, default False
+ Force Google BigQuery to re-authenticate the user. This is useful
+ if multiple accounts are used.
+ auth_local_webserver : bool, default True
+ Use the `local webserver flow`_ instead of the `console flow`_
+ when getting user credentials.
+
+ .. _local webserver flow:
+ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
+ .. _console flow:
+ https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
+
+ *New in version 0.2.0 of pandas-gbq*.
+
+ .. versionchanged:: 1.5.0
+ Default value is changed to ``True``. Google has deprecated the
+ ``auth_local_webserver = False`` `"out of band" (copy-paste)
+ flow
+ `_.
+ dialect : str, default 'legacy'
+ Note: The default value is changing to 'standard' in a future version.
+
+ SQL syntax dialect to use. Value can be one of:
+
+ ``'legacy'``
+ Use BigQuery's legacy SQL dialect. For more information see
+ `BigQuery Legacy SQL Reference
+ `__.
+ ``'standard'``
+ Use BigQuery's standard SQL, which is
+ compliant with the SQL 2011 standard. For more information
+ see `BigQuery Standard SQL Reference
+ `__.
+ location : str, optional
+ Location where the query job should run. See the `BigQuery locations
+ documentation
+ `__ for a
+ list of available locations. The location must match that of any
+ datasets used in the query.
+
+ *New in version 0.5.0 of pandas-gbq*.
+ configuration : dict, optional
+ Query config parameters for job processing.
+ For example:
+
+ configuration = {'query': {'useQueryCache': False}}
+
+ For more information see `BigQuery REST API Reference
+ `__.
+ credentials : google.auth.credentials.Credentials, optional
+ Credentials for accessing Google APIs. Use this parameter to override
+ default credentials, such as to use Compute Engine
+ :class:`google.auth.compute_engine.Credentials` or Service Account
+ :class:`google.oauth2.service_account.Credentials` directly.
+
+ *New in version 0.8.0 of pandas-gbq*.
+ use_bqstorage_api : bool, default False
+ Use the `BigQuery Storage API
+ `__ to
+ download query results quickly, but at an increased cost. To use this
+ API, first `enable it in the Cloud Console
+ `__.
+ You must also have the `bigquery.readsessions.create
+ `__
+ permission on the project you are billing queries to.
+
+ This feature requires version 0.10.0 or later of the ``pandas-gbq``
+ package. It also requires the ``google-cloud-bigquery-storage`` and
+ ``fastavro`` packages.
+
+ max_results : int, optional
+ If set, limit the maximum number of rows to fetch from the query
+ results.
+
+ progress_bar_type : Optional, str
+ If set, use the `tqdm `__ library to
+ display a progress bar while the data downloads. Install the
+ ``tqdm`` package to use this feature.
+
+ Possible values of ``progress_bar_type`` include:
+
+ ``None``
+ No progress bar.
+ ``'tqdm'``
+ Use the :func:`tqdm.tqdm` function to print a progress bar
+ to :data:`sys.stderr`.
+ ``'tqdm_notebook'``
+ Use the :func:`tqdm.tqdm_notebook` function to display a
+ progress bar as a Jupyter notebook widget.
+ ``'tqdm_gui'``
+ Use the :func:`tqdm.tqdm_gui` function to display a
+ progress bar as a graphical dialog box.
+
+ Returns
+ -------
+ df: DataFrame
+ DataFrame representing results of query.
+
+ See Also
+ --------
+ pandas_gbq.read_gbq : This function in the pandas-gbq library.
+ DataFrame.to_gbq : Write a DataFrame to Google BigQuery.
+
+ Examples
+ --------
+ Example taken from `Google BigQuery documentation
+ `_
+
+ >>> sql = "SELECT name FROM table_name WHERE state = 'TX' LIMIT 100;"
+ >>> df = pd.read_gbq(sql, dialect="standard") # doctest: +SKIP
+ >>> project_id = "your-project-id" # doctest: +SKIP
+ >>> df = pd.read_gbq(sql,
+ ... project_id=project_id,
+ ... dialect="standard"
+ ... ) # doctest: +SKIP
+ """
+ warnings.warn(
+ "read_gbq is deprecated and will be removed in a future version. "
+ "Please use pandas_gbq.read_gbq instead: "
+ "https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.read_gbq",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ pandas_gbq = _try_import()
+
+ kwargs: dict[str, str | bool | int | None] = {}
+
+ # START: new kwargs. Don't populate unless explicitly set.
+ if use_bqstorage_api is not None:
+ kwargs["use_bqstorage_api"] = use_bqstorage_api
+ if max_results is not None:
+ kwargs["max_results"] = max_results
+
+ kwargs["progress_bar_type"] = progress_bar_type
+ # END: new kwargs
+
+ return pandas_gbq.read_gbq(
+ query,
+ project_id=project_id,
+ index_col=index_col,
+ col_order=col_order,
+ reauth=reauth,
+ auth_local_webserver=auth_local_webserver,
+ dialect=dialect,
+ location=location,
+ configuration=configuration,
+ credentials=credentials,
+ **kwargs,
+ )
+
+
+def to_gbq(
+ dataframe: DataFrame,
+ destination_table: str,
+ project_id: str | None = None,
+ chunksize: int | None = None,
+ reauth: bool = False,
+ if_exists: str = "fail",
+ auth_local_webserver: bool = True,
+ table_schema: list[dict[str, str]] | None = None,
+ location: str | None = None,
+ progress_bar: bool = True,
+ credentials: google.auth.credentials.Credentials | None = None,
+) -> None:
+ warnings.warn(
+ "to_gbq is deprecated and will be removed in a future version. "
+ "Please use pandas_gbq.to_gbq instead: "
+ "https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ pandas_gbq = _try_import()
+ pandas_gbq.to_gbq(
+ dataframe,
+ destination_table,
+ project_id=project_id,
+ chunksize=chunksize,
+ reauth=reauth,
+ if_exists=if_exists,
+ auth_local_webserver=auth_local_webserver,
+ table_schema=table_schema,
+ location=location,
+ progress_bar=progress_bar,
+ credentials=credentials,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f4e7a62834b57c151189cdd2994a55d1ad9f7de
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__init__.py
@@ -0,0 +1,15 @@
+from pandas.io.json._json import (
+ read_json,
+ to_json,
+ ujson_dumps,
+ ujson_loads,
+)
+from pandas.io.json._table_schema import build_table_schema
+
+__all__ = [
+ "ujson_dumps",
+ "ujson_loads",
+ "read_json",
+ "to_json",
+ "build_table_schema",
+]
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69f37e48802b021b74213ebac5360d84bbe85e04
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..70e3255e098d4e5663b4ad6ab40c2d387ca0427e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_normalize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_normalize.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6afa40be149a854f3d621cf7c14b6b1a98df5028
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_normalize.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_table_schema.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_table_schema.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72b5280055a7d585285034ce93a2d32f29a721ae
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_table_schema.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/json/_json.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..9414f4521502999881238cf0b6e0dcb5c7ab1ad5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/_json.py
@@ -0,0 +1,1505 @@
+from __future__ import annotations
+
+from abc import (
+ ABC,
+ abstractmethod,
+)
+from collections import abc
+from io import StringIO
+from itertools import islice
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Generic,
+ Literal,
+ TypeVar,
+ final,
+ overload,
+)
+import warnings
+
+import numpy as np
+
+from pandas._libs import lib
+from pandas._libs.json import (
+ ujson_dumps,
+ ujson_loads,
+)
+from pandas._libs.tslibs import iNaT
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import AbstractMethodError
+from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.common import (
+ ensure_str,
+ is_string_dtype,
+)
+from pandas.core.dtypes.dtypes import PeriodDtype
+
+from pandas import (
+ ArrowDtype,
+ DataFrame,
+ Index,
+ MultiIndex,
+ Series,
+ isna,
+ notna,
+ to_datetime,
+)
+from pandas.core.reshape.concat import concat
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.common import (
+ IOHandles,
+ dedup_names,
+ extension_to_compression,
+ file_exists,
+ get_handle,
+ is_fsspec_url,
+ is_potential_multi_index,
+ is_url,
+ stringify_path,
+)
+from pandas.io.json._normalize import convert_to_line_delimits
+from pandas.io.json._table_schema import (
+ build_table_schema,
+ parse_table_schema,
+)
+from pandas.io.parsers.readers import validate_integer
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Hashable,
+ Mapping,
+ )
+ from types import TracebackType
+
+ from pandas._typing import (
+ CompressionOptions,
+ DtypeArg,
+ DtypeBackend,
+ FilePath,
+ IndexLabel,
+ JSONEngine,
+ JSONSerializable,
+ ReadBuffer,
+ Self,
+ StorageOptions,
+ WriteBuffer,
+ )
+
+ from pandas.core.generic import NDFrame
+
+FrameSeriesStrT = TypeVar("FrameSeriesStrT", bound=Literal["frame", "series"])
+
+
+# interface to/from
+@overload
+def to_json(
+ path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes],
+ obj: NDFrame,
+ orient: str | None = ...,
+ date_format: str = ...,
+ double_precision: int = ...,
+ force_ascii: bool = ...,
+ date_unit: str = ...,
+ default_handler: Callable[[Any], JSONSerializable] | None = ...,
+ lines: bool = ...,
+ compression: CompressionOptions = ...,
+ index: bool | None = ...,
+ indent: int = ...,
+ storage_options: StorageOptions = ...,
+ mode: Literal["a", "w"] = ...,
+) -> None:
+ ...
+
+
+@overload
+def to_json(
+ path_or_buf: None,
+ obj: NDFrame,
+ orient: str | None = ...,
+ date_format: str = ...,
+ double_precision: int = ...,
+ force_ascii: bool = ...,
+ date_unit: str = ...,
+ default_handler: Callable[[Any], JSONSerializable] | None = ...,
+ lines: bool = ...,
+ compression: CompressionOptions = ...,
+ index: bool | None = ...,
+ indent: int = ...,
+ storage_options: StorageOptions = ...,
+ mode: Literal["a", "w"] = ...,
+) -> str:
+ ...
+
+
+def to_json(
+ path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] | None,
+ obj: NDFrame,
+ orient: str | None = None,
+ date_format: str = "epoch",
+ double_precision: int = 10,
+ force_ascii: bool = True,
+ date_unit: str = "ms",
+ default_handler: Callable[[Any], JSONSerializable] | None = None,
+ lines: bool = False,
+ compression: CompressionOptions = "infer",
+ index: bool | None = None,
+ indent: int = 0,
+ storage_options: StorageOptions | None = None,
+ mode: Literal["a", "w"] = "w",
+) -> str | None:
+ if orient in ["records", "values"] and index is True:
+ raise ValueError(
+ "'index=True' is only valid when 'orient' is 'split', 'table', "
+ "'index', or 'columns'."
+ )
+ elif orient in ["index", "columns"] and index is False:
+ raise ValueError(
+ "'index=False' is only valid when 'orient' is 'split', 'table', "
+ "'records', or 'values'."
+ )
+ elif index is None:
+ # will be ignored for orient='records' and 'values'
+ index = True
+
+ if lines and orient != "records":
+ raise ValueError("'lines' keyword only valid when 'orient' is records")
+
+ if mode not in ["a", "w"]:
+ msg = (
+ f"mode={mode} is not a valid option."
+ "Only 'w' and 'a' are currently supported."
+ )
+ raise ValueError(msg)
+
+ if mode == "a" and (not lines or orient != "records"):
+ msg = (
+ "mode='a' (append) is only supported when "
+ "lines is True and orient is 'records'"
+ )
+ raise ValueError(msg)
+
+ if orient == "table" and isinstance(obj, Series):
+ obj = obj.to_frame(name=obj.name or "values")
+
+ writer: type[Writer]
+ if orient == "table" and isinstance(obj, DataFrame):
+ writer = JSONTableWriter
+ elif isinstance(obj, Series):
+ writer = SeriesWriter
+ elif isinstance(obj, DataFrame):
+ writer = FrameWriter
+ else:
+ raise NotImplementedError("'obj' should be a Series or a DataFrame")
+
+ s = writer(
+ obj,
+ orient=orient,
+ date_format=date_format,
+ double_precision=double_precision,
+ ensure_ascii=force_ascii,
+ date_unit=date_unit,
+ default_handler=default_handler,
+ index=index,
+ indent=indent,
+ ).write()
+
+ if lines:
+ s = convert_to_line_delimits(s)
+
+ if path_or_buf is not None:
+ # apply compression and byte/text conversion
+ with get_handle(
+ path_or_buf, mode, compression=compression, storage_options=storage_options
+ ) as handles:
+ handles.handle.write(s)
+ else:
+ return s
+ return None
+
+
+class Writer(ABC):
+ _default_orient: str
+
+ def __init__(
+ self,
+ obj: NDFrame,
+ orient: str | None,
+ date_format: str,
+ double_precision: int,
+ ensure_ascii: bool,
+ date_unit: str,
+ index: bool,
+ default_handler: Callable[[Any], JSONSerializable] | None = None,
+ indent: int = 0,
+ ) -> None:
+ self.obj = obj
+
+ if orient is None:
+ orient = self._default_orient
+
+ self.orient = orient
+ self.date_format = date_format
+ self.double_precision = double_precision
+ self.ensure_ascii = ensure_ascii
+ self.date_unit = date_unit
+ self.default_handler = default_handler
+ self.index = index
+ self.indent = indent
+
+ self.is_copy = None
+ self._format_axes()
+
+ def _format_axes(self) -> None:
+ raise AbstractMethodError(self)
+
+ def write(self) -> str:
+ iso_dates = self.date_format == "iso"
+ return ujson_dumps(
+ self.obj_to_write,
+ orient=self.orient,
+ double_precision=self.double_precision,
+ ensure_ascii=self.ensure_ascii,
+ date_unit=self.date_unit,
+ iso_dates=iso_dates,
+ default_handler=self.default_handler,
+ indent=self.indent,
+ )
+
+ @property
+ @abstractmethod
+ def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
+ """Object to write in JSON format."""
+
+
+class SeriesWriter(Writer):
+ _default_orient = "index"
+
+ @property
+ def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
+ if not self.index and self.orient == "split":
+ return {"name": self.obj.name, "data": self.obj.values}
+ else:
+ return self.obj
+
+ def _format_axes(self) -> None:
+ if not self.obj.index.is_unique and self.orient == "index":
+ raise ValueError(f"Series index must be unique for orient='{self.orient}'")
+
+
+class FrameWriter(Writer):
+ _default_orient = "columns"
+
+ @property
+ def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
+ if not self.index and self.orient == "split":
+ obj_to_write = self.obj.to_dict(orient="split")
+ del obj_to_write["index"]
+ else:
+ obj_to_write = self.obj
+ return obj_to_write
+
+ def _format_axes(self) -> None:
+ """
+ Try to format axes if they are datelike.
+ """
+ if not self.obj.index.is_unique and self.orient in ("index", "columns"):
+ raise ValueError(
+ f"DataFrame index must be unique for orient='{self.orient}'."
+ )
+ if not self.obj.columns.is_unique and self.orient in (
+ "index",
+ "columns",
+ "records",
+ ):
+ raise ValueError(
+ f"DataFrame columns must be unique for orient='{self.orient}'."
+ )
+
+
+class JSONTableWriter(FrameWriter):
+ _default_orient = "records"
+
+ def __init__(
+ self,
+ obj,
+ orient: str | None,
+ date_format: str,
+ double_precision: int,
+ ensure_ascii: bool,
+ date_unit: str,
+ index: bool,
+ default_handler: Callable[[Any], JSONSerializable] | None = None,
+ indent: int = 0,
+ ) -> None:
+ """
+ Adds a `schema` attribute with the Table Schema, resets
+ the index (can't do in caller, because the schema inference needs
+ to know what the index is, forces orient to records, and forces
+ date_format to 'iso'.
+ """
+ super().__init__(
+ obj,
+ orient,
+ date_format,
+ double_precision,
+ ensure_ascii,
+ date_unit,
+ index,
+ default_handler=default_handler,
+ indent=indent,
+ )
+
+ if date_format != "iso":
+ msg = (
+ "Trying to write with `orient='table'` and "
+ f"`date_format='{date_format}'`. Table Schema requires dates "
+ "to be formatted with `date_format='iso'`"
+ )
+ raise ValueError(msg)
+
+ self.schema = build_table_schema(obj, index=self.index)
+
+ # NotImplemented on a column MultiIndex
+ if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
+ raise NotImplementedError(
+ "orient='table' is not supported for MultiIndex columns"
+ )
+
+ # TODO: Do this timedelta properly in objToJSON.c See GH #15137
+ if (
+ (obj.ndim == 1)
+ and (obj.name in set(obj.index.names))
+ or len(obj.columns.intersection(obj.index.names))
+ ):
+ msg = "Overlapping names between the index and columns"
+ raise ValueError(msg)
+
+ obj = obj.copy()
+ timedeltas = obj.select_dtypes(include=["timedelta"]).columns
+ if len(timedeltas):
+ obj[timedeltas] = obj[timedeltas].map(lambda x: x.isoformat())
+ # Convert PeriodIndex to datetimes before serializing
+ if isinstance(obj.index.dtype, PeriodDtype):
+ obj.index = obj.index.to_timestamp()
+
+ # exclude index from obj if index=False
+ if not self.index:
+ self.obj = obj.reset_index(drop=True)
+ else:
+ self.obj = obj.reset_index(drop=False)
+ self.date_format = "iso"
+ self.orient = "records"
+ self.index = index
+
+ @property
+ def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
+ return {"schema": self.schema, "data": self.obj}
+
+
+@overload
+def read_json(
+ path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
+ *,
+ orient: str | None = ...,
+ typ: Literal["frame"] = ...,
+ dtype: DtypeArg | None = ...,
+ convert_axes: bool | None = ...,
+ convert_dates: bool | list[str] = ...,
+ keep_default_dates: bool = ...,
+ precise_float: bool = ...,
+ date_unit: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ lines: bool = ...,
+ chunksize: int,
+ compression: CompressionOptions = ...,
+ nrows: int | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ engine: JSONEngine = ...,
+) -> JsonReader[Literal["frame"]]:
+ ...
+
+
+@overload
+def read_json(
+ path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
+ *,
+ orient: str | None = ...,
+ typ: Literal["series"],
+ dtype: DtypeArg | None = ...,
+ convert_axes: bool | None = ...,
+ convert_dates: bool | list[str] = ...,
+ keep_default_dates: bool = ...,
+ precise_float: bool = ...,
+ date_unit: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ lines: bool = ...,
+ chunksize: int,
+ compression: CompressionOptions = ...,
+ nrows: int | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ engine: JSONEngine = ...,
+) -> JsonReader[Literal["series"]]:
+ ...
+
+
+@overload
+def read_json(
+ path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
+ *,
+ orient: str | None = ...,
+ typ: Literal["series"],
+ dtype: DtypeArg | None = ...,
+ convert_axes: bool | None = ...,
+ convert_dates: bool | list[str] = ...,
+ keep_default_dates: bool = ...,
+ precise_float: bool = ...,
+ date_unit: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ lines: bool = ...,
+ chunksize: None = ...,
+ compression: CompressionOptions = ...,
+ nrows: int | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ engine: JSONEngine = ...,
+) -> Series:
+ ...
+
+
+@overload
+def read_json(
+ path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
+ *,
+ orient: str | None = ...,
+ typ: Literal["frame"] = ...,
+ dtype: DtypeArg | None = ...,
+ convert_axes: bool | None = ...,
+ convert_dates: bool | list[str] = ...,
+ keep_default_dates: bool = ...,
+ precise_float: bool = ...,
+ date_unit: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ lines: bool = ...,
+ chunksize: None = ...,
+ compression: CompressionOptions = ...,
+ nrows: int | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ engine: JSONEngine = ...,
+) -> DataFrame:
+ ...
+
+
+@doc(
+ storage_options=_shared_docs["storage_options"],
+ decompression_options=_shared_docs["decompression_options"] % "path_or_buf",
+)
+def read_json(
+ path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
+ *,
+ orient: str | None = None,
+ typ: Literal["frame", "series"] = "frame",
+ dtype: DtypeArg | None = None,
+ convert_axes: bool | None = None,
+ convert_dates: bool | list[str] = True,
+ keep_default_dates: bool = True,
+ precise_float: bool = False,
+ date_unit: str | None = None,
+ encoding: str | None = None,
+ encoding_errors: str | None = "strict",
+ lines: bool = False,
+ chunksize: int | None = None,
+ compression: CompressionOptions = "infer",
+ nrows: int | None = None,
+ storage_options: StorageOptions | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ engine: JSONEngine = "ujson",
+) -> DataFrame | Series | JsonReader:
+ """
+ Convert a JSON string to pandas object.
+
+ Parameters
+ ----------
+ path_or_buf : a valid JSON str, path object or file-like object
+ Any valid string path is acceptable. The string could be a URL. Valid
+ URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be:
+ ``file://localhost/path/to/table.json``.
+
+ If you want to pass in a path object, pandas accepts any
+ ``os.PathLike``.
+
+ By file-like object, we refer to objects with a ``read()`` method,
+ such as a file handle (e.g. via builtin ``open`` function)
+ or ``StringIO``.
+
+ .. deprecated:: 2.1.0
+ Passing json literal strings is deprecated.
+
+ orient : str, optional
+ Indication of expected JSON string format.
+ Compatible JSON strings can be produced by ``to_json()`` with a
+ corresponding orient value.
+ The set of possible orients is:
+
+ - ``'split'`` : dict like
+ ``{{index -> [index], columns -> [columns], data -> [values]}}``
+ - ``'records'`` : list like
+ ``[{{column -> value}}, ... , {{column -> value}}]``
+ - ``'index'`` : dict like ``{{index -> {{column -> value}}}}``
+ - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}``
+ - ``'values'`` : just the values array
+ - ``'table'`` : dict like ``{{'schema': {{schema}}, 'data': {{data}}}}``
+
+ The allowed and default values depend on the value
+ of the `typ` parameter.
+
+ * when ``typ == 'series'``,
+
+ - allowed orients are ``{{'split','records','index'}}``
+ - default is ``'index'``
+ - The Series index must be unique for orient ``'index'``.
+
+ * when ``typ == 'frame'``,
+
+ - allowed orients are ``{{'split','records','index',
+ 'columns','values', 'table'}}``
+ - default is ``'columns'``
+ - The DataFrame index must be unique for orients ``'index'`` and
+ ``'columns'``.
+ - The DataFrame columns must be unique for orients ``'index'``,
+ ``'columns'``, and ``'records'``.
+
+ typ : {{'frame', 'series'}}, default 'frame'
+ The type of object to recover.
+
+ dtype : bool or dict, default None
+ If True, infer dtypes; if a dict of column to dtype, then use those;
+ if False, then don't infer dtypes at all, applies only to the data.
+
+ For all ``orient`` values except ``'table'``, default is True.
+
+ convert_axes : bool, default None
+ Try to convert the axes to the proper dtypes.
+
+ For all ``orient`` values except ``'table'``, default is True.
+
+ convert_dates : bool or list of str, default True
+ If True then default datelike columns may be converted (depending on
+ keep_default_dates).
+ If False, no dates will be converted.
+ If a list of column names, then those columns will be converted and
+ default datelike columns may also be converted (depending on
+ keep_default_dates).
+
+ keep_default_dates : bool, default True
+ If parsing dates (convert_dates is not False), then try to parse the
+ default datelike columns.
+ A column label is datelike if
+
+ * it ends with ``'_at'``,
+
+ * it ends with ``'_time'``,
+
+ * it begins with ``'timestamp'``,
+
+ * it is ``'modified'``, or
+
+ * it is ``'date'``.
+
+ precise_float : bool, default False
+ Set to enable usage of higher precision (strtod) function when
+ decoding string to double values. Default (False) is to use fast but
+ less precise builtin functionality.
+
+ date_unit : str, default None
+ The timestamp unit to detect if converting dates. The default behaviour
+ is to try and detect the correct precision, but if this is not desired
+ then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
+ milliseconds, microseconds or nanoseconds respectively.
+
+ encoding : str, default is 'utf-8'
+ The encoding to use to decode py3 bytes.
+
+ encoding_errors : str, optional, default "strict"
+ How encoding errors are treated. `List of possible values
+ `_ .
+
+ .. versionadded:: 1.3.0
+
+ lines : bool, default False
+ Read the file as a json object per line.
+
+ chunksize : int, optional
+ Return JsonReader object for iteration.
+ See the `line-delimited json docs
+ `_
+ for more information on ``chunksize``.
+ This can only be passed if `lines=True`.
+ If this is None, the file will be read into memory all at once.
+ {decompression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ nrows : int, optional
+ The number of lines from the line-delimited jsonfile that has to be read.
+ This can only be passed if `lines=True`.
+ If this is None, all the rows will be returned.
+
+ {storage_options}
+
+ dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ engine : {{"ujson", "pyarrow"}}, default "ujson"
+ Parser engine to use. The ``"pyarrow"`` engine is only available when
+ ``lines=True``.
+
+ .. versionadded:: 2.0
+
+ Returns
+ -------
+ Series, DataFrame, or pandas.api.typing.JsonReader
+ A JsonReader is returned when ``chunksize`` is not ``0`` or ``None``.
+ Otherwise, the type returned depends on the value of ``typ``.
+
+ See Also
+ --------
+ DataFrame.to_json : Convert a DataFrame to a JSON string.
+ Series.to_json : Convert a Series to a JSON string.
+ json_normalize : Normalize semi-structured JSON data into a flat table.
+
+ Notes
+ -----
+ Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
+ :class:`Index` name of `index` gets written with :func:`to_json`, the
+ subsequent read operation will incorrectly set the :class:`Index` name to
+ ``None``. This is because `index` is also used by :func:`DataFrame.to_json`
+ to denote a missing :class:`Index` name, and the subsequent
+ :func:`read_json` operation cannot distinguish between the two. The same
+ limitation is encountered with a :class:`MultiIndex` and any names
+ beginning with ``'level_'``.
+
+ Examples
+ --------
+ >>> from io import StringIO
+ >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
+ ... index=['row 1', 'row 2'],
+ ... columns=['col 1', 'col 2'])
+
+ Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
+
+ >>> df.to_json(orient='split')
+ '\
+{{\
+"columns":["col 1","col 2"],\
+"index":["row 1","row 2"],\
+"data":[["a","b"],["c","d"]]\
+}}\
+'
+ >>> pd.read_json(StringIO(_), orient='split')
+ col 1 col 2
+ row 1 a b
+ row 2 c d
+
+ Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
+
+ >>> df.to_json(orient='index')
+ '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}'
+
+ >>> pd.read_json(StringIO(_), orient='index')
+ col 1 col 2
+ row 1 a b
+ row 2 c d
+
+ Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
+ Note that index labels are not preserved with this encoding.
+
+ >>> df.to_json(orient='records')
+ '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]'
+ >>> pd.read_json(StringIO(_), orient='records')
+ col 1 col 2
+ 0 a b
+ 1 c d
+
+ Encoding with Table Schema
+
+ >>> df.to_json(orient='table')
+ '\
+{{"schema":{{"fields":[\
+{{"name":"index","type":"string"}},\
+{{"name":"col 1","type":"string"}},\
+{{"name":"col 2","type":"string"}}],\
+"primaryKey":["index"],\
+"pandas_version":"1.4.0"}},\
+"data":[\
+{{"index":"row 1","col 1":"a","col 2":"b"}},\
+{{"index":"row 2","col 1":"c","col 2":"d"}}]\
+}}\
+'
+
+ The following example uses ``dtype_backend="numpy_nullable"``
+
+ >>> data = '''{{"index": {{"0": 0, "1": 1}},
+ ... "a": {{"0": 1, "1": null}},
+ ... "b": {{"0": 2.5, "1": 4.5}},
+ ... "c": {{"0": true, "1": false}},
+ ... "d": {{"0": "a", "1": "b"}},
+ ... "e": {{"0": 1577.2, "1": 1577.1}}}}'''
+ >>> pd.read_json(StringIO(data), dtype_backend="numpy_nullable")
+ index a b c d e
+ 0 0 1 2.5 True a 1577.2
+ 1 1 4.5 False b 1577.1
+ """
+ if orient == "table" and dtype:
+ raise ValueError("cannot pass both dtype and orient='table'")
+ if orient == "table" and convert_axes:
+ raise ValueError("cannot pass both convert_axes and orient='table'")
+
+ check_dtype_backend(dtype_backend)
+
+ if dtype is None and orient != "table":
+ # error: Incompatible types in assignment (expression has type "bool", variable
+ # has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float],
+ # Type[int], Type[complex], Type[bool], Type[object], Dict[Hashable,
+ # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float],
+ # Type[int], Type[complex], Type[bool], Type[object]]], None]")
+ dtype = True # type: ignore[assignment]
+ if convert_axes is None and orient != "table":
+ convert_axes = True
+
+ json_reader = JsonReader(
+ path_or_buf,
+ orient=orient,
+ typ=typ,
+ dtype=dtype,
+ convert_axes=convert_axes,
+ convert_dates=convert_dates,
+ keep_default_dates=keep_default_dates,
+ precise_float=precise_float,
+ date_unit=date_unit,
+ encoding=encoding,
+ lines=lines,
+ chunksize=chunksize,
+ compression=compression,
+ nrows=nrows,
+ storage_options=storage_options,
+ encoding_errors=encoding_errors,
+ dtype_backend=dtype_backend,
+ engine=engine,
+ )
+
+ if chunksize:
+ return json_reader
+ else:
+ return json_reader.read()
+
+
+class JsonReader(abc.Iterator, Generic[FrameSeriesStrT]):
+ """
+ JsonReader provides an interface for reading in a JSON file.
+
+ If initialized with ``lines=True`` and ``chunksize``, can be iterated over
+ ``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
+ whole document.
+ """
+
+ def __init__(
+ self,
+ filepath_or_buffer,
+ orient,
+ typ: FrameSeriesStrT,
+ dtype,
+ convert_axes: bool | None,
+ convert_dates,
+ keep_default_dates: bool,
+ precise_float: bool,
+ date_unit,
+ encoding,
+ lines: bool,
+ chunksize: int | None,
+ compression: CompressionOptions,
+ nrows: int | None,
+ storage_options: StorageOptions | None = None,
+ encoding_errors: str | None = "strict",
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ engine: JSONEngine = "ujson",
+ ) -> None:
+ self.orient = orient
+ self.typ = typ
+ self.dtype = dtype
+ self.convert_axes = convert_axes
+ self.convert_dates = convert_dates
+ self.keep_default_dates = keep_default_dates
+ self.precise_float = precise_float
+ self.date_unit = date_unit
+ self.encoding = encoding
+ self.engine = engine
+ self.compression = compression
+ self.storage_options = storage_options
+ self.lines = lines
+ self.chunksize = chunksize
+ self.nrows_seen = 0
+ self.nrows = nrows
+ self.encoding_errors = encoding_errors
+ self.handles: IOHandles[str] | None = None
+ self.dtype_backend = dtype_backend
+
+ if self.engine not in {"pyarrow", "ujson"}:
+ raise ValueError(
+ f"The engine type {self.engine} is currently not supported."
+ )
+ if self.chunksize is not None:
+ self.chunksize = validate_integer("chunksize", self.chunksize, 1)
+ if not self.lines:
+ raise ValueError("chunksize can only be passed if lines=True")
+ if self.engine == "pyarrow":
+ raise ValueError(
+ "currently pyarrow engine doesn't support chunksize parameter"
+ )
+ if self.nrows is not None:
+ self.nrows = validate_integer("nrows", self.nrows, 0)
+ if not self.lines:
+ raise ValueError("nrows can only be passed if lines=True")
+ if (
+ isinstance(filepath_or_buffer, str)
+ and not self.lines
+ and "\n" in filepath_or_buffer
+ ):
+ warnings.warn(
+ "Passing literal json to 'read_json' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "literal string, wrap it in a 'StringIO' object.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ if self.engine == "pyarrow":
+ if not self.lines:
+ raise ValueError(
+ "currently pyarrow engine only supports "
+ "the line-delimited JSON format"
+ )
+ self.data = filepath_or_buffer
+ elif self.engine == "ujson":
+ data = self._get_data_from_filepath(filepath_or_buffer)
+ self.data = self._preprocess_data(data)
+
+ def _preprocess_data(self, data):
+ """
+ At this point, the data either has a `read` attribute (e.g. a file
+ object or a StringIO) or is a string that is a JSON document.
+
+ If self.chunksize, we prepare the data for the `__next__` method.
+ Otherwise, we read it into memory for the `read` method.
+ """
+ if hasattr(data, "read") and not (self.chunksize or self.nrows):
+ with self:
+ data = data.read()
+ if not hasattr(data, "read") and (self.chunksize or self.nrows):
+ data = StringIO(data)
+
+ return data
+
+ def _get_data_from_filepath(self, filepath_or_buffer):
+ """
+ The function read_json accepts three input types:
+ 1. filepath (string-like)
+ 2. file-like object (e.g. open file object, StringIO)
+ 3. JSON string
+
+ This method turns (1) into (2) to simplify the rest of the processing.
+ It returns input types (2) and (3) unchanged.
+
+ It raises FileNotFoundError if the input is a string ending in
+ one of .json, .json.gz, .json.bz2, etc. but no such file exists.
+ """
+ # if it is a string but the file does not exist, it might be a JSON string
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+ if (
+ not isinstance(filepath_or_buffer, str)
+ or is_url(filepath_or_buffer)
+ or is_fsspec_url(filepath_or_buffer)
+ or file_exists(filepath_or_buffer)
+ ):
+ self.handles = get_handle(
+ filepath_or_buffer,
+ "r",
+ encoding=self.encoding,
+ compression=self.compression,
+ storage_options=self.storage_options,
+ errors=self.encoding_errors,
+ )
+ filepath_or_buffer = self.handles.handle
+ elif (
+ isinstance(filepath_or_buffer, str)
+ and filepath_or_buffer.lower().endswith(
+ (".json",) + tuple(f".json{c}" for c in extension_to_compression)
+ )
+ and not file_exists(filepath_or_buffer)
+ ):
+ raise FileNotFoundError(f"File {filepath_or_buffer} does not exist")
+ else:
+ warnings.warn(
+ "Passing literal json to 'read_json' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "literal string, wrap it in a 'StringIO' object.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return filepath_or_buffer
+
+ def _combine_lines(self, lines) -> str:
+ """
+ Combines a list of JSON objects into one JSON object.
+ """
+ return (
+ f'[{",".join([line for line in (line.strip() for line in lines) if line])}]'
+ )
+
+ @overload
+ def read(self: JsonReader[Literal["frame"]]) -> DataFrame:
+ ...
+
+ @overload
+ def read(self: JsonReader[Literal["series"]]) -> Series:
+ ...
+
+ @overload
+ def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series:
+ ...
+
+ def read(self) -> DataFrame | Series:
+ """
+ Read the whole JSON input into a pandas object.
+ """
+ obj: DataFrame | Series
+ with self:
+ if self.engine == "pyarrow":
+ pyarrow_json = import_optional_dependency("pyarrow.json")
+ pa_table = pyarrow_json.read_json(self.data)
+
+ mapping: type[ArrowDtype] | None | Callable
+ if self.dtype_backend == "pyarrow":
+ mapping = ArrowDtype
+ elif self.dtype_backend == "numpy_nullable":
+ from pandas.io._util import _arrow_dtype_mapping
+
+ mapping = _arrow_dtype_mapping().get
+ else:
+ mapping = None
+
+ return pa_table.to_pandas(types_mapper=mapping)
+ elif self.engine == "ujson":
+ if self.lines:
+ if self.chunksize:
+ obj = concat(self)
+ elif self.nrows:
+ lines = list(islice(self.data, self.nrows))
+ lines_json = self._combine_lines(lines)
+ obj = self._get_object_parser(lines_json)
+ else:
+ data = ensure_str(self.data)
+ data_lines = data.split("\n")
+ obj = self._get_object_parser(self._combine_lines(data_lines))
+ else:
+ obj = self._get_object_parser(self.data)
+ if self.dtype_backend is not lib.no_default:
+ return obj.convert_dtypes(
+ infer_objects=False, dtype_backend=self.dtype_backend
+ )
+ else:
+ return obj
+
+ def _get_object_parser(self, json) -> DataFrame | Series:
+ """
+ Parses a json document into a pandas object.
+ """
+ typ = self.typ
+ dtype = self.dtype
+ kwargs = {
+ "orient": self.orient,
+ "dtype": self.dtype,
+ "convert_axes": self.convert_axes,
+ "convert_dates": self.convert_dates,
+ "keep_default_dates": self.keep_default_dates,
+ "precise_float": self.precise_float,
+ "date_unit": self.date_unit,
+ "dtype_backend": self.dtype_backend,
+ }
+ obj = None
+ if typ == "frame":
+ obj = FrameParser(json, **kwargs).parse()
+
+ if typ == "series" or obj is None:
+ if not isinstance(dtype, bool):
+ kwargs["dtype"] = dtype
+ obj = SeriesParser(json, **kwargs).parse()
+
+ return obj
+
+ def close(self) -> None:
+ """
+ If we opened a stream earlier, in _get_data_from_filepath, we should
+ close it.
+
+ If an open stream or file was passed, we leave it open.
+ """
+ if self.handles is not None:
+ self.handles.close()
+
+ def __iter__(self) -> Self:
+ return self
+
+ @overload
+ def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame:
+ ...
+
+ @overload
+ def __next__(self: JsonReader[Literal["series"]]) -> Series:
+ ...
+
+ @overload
+ def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series:
+ ...
+
+ def __next__(self) -> DataFrame | Series:
+ if self.nrows and self.nrows_seen >= self.nrows:
+ self.close()
+ raise StopIteration
+
+ lines = list(islice(self.data, self.chunksize))
+ if not lines:
+ self.close()
+ raise StopIteration
+
+ try:
+ lines_json = self._combine_lines(lines)
+ obj = self._get_object_parser(lines_json)
+
+ # Make sure that the returned objects have the right index.
+ obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
+ self.nrows_seen += len(obj)
+ except Exception as ex:
+ self.close()
+ raise ex
+
+ if self.dtype_backend is not lib.no_default:
+ return obj.convert_dtypes(
+ infer_objects=False, dtype_backend=self.dtype_backend
+ )
+ else:
+ return obj
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+
+class Parser:
+ _split_keys: tuple[str, ...]
+ _default_orient: str
+
+ _STAMP_UNITS = ("s", "ms", "us", "ns")
+ _MIN_STAMPS = {
+ "s": 31536000,
+ "ms": 31536000000,
+ "us": 31536000000000,
+ "ns": 31536000000000000,
+ }
+ json: str
+
+ def __init__(
+ self,
+ json: str,
+ orient,
+ dtype: DtypeArg | None = None,
+ convert_axes: bool = True,
+ convert_dates: bool | list[str] = True,
+ keep_default_dates: bool = False,
+ precise_float: bool = False,
+ date_unit=None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ ) -> None:
+ self.json = json
+
+ if orient is None:
+ orient = self._default_orient
+
+ self.orient = orient
+
+ self.dtype = dtype
+
+ if date_unit is not None:
+ date_unit = date_unit.lower()
+ if date_unit not in self._STAMP_UNITS:
+ raise ValueError(f"date_unit must be one of {self._STAMP_UNITS}")
+ self.min_stamp = self._MIN_STAMPS[date_unit]
+ else:
+ self.min_stamp = self._MIN_STAMPS["s"]
+
+ self.precise_float = precise_float
+ self.convert_axes = convert_axes
+ self.convert_dates = convert_dates
+ self.date_unit = date_unit
+ self.keep_default_dates = keep_default_dates
+ self.obj: DataFrame | Series | None = None
+ self.dtype_backend = dtype_backend
+
+ @final
+ def check_keys_split(self, decoded: dict) -> None:
+ """
+ Checks that dict has only the appropriate keys for orient='split'.
+ """
+ bad_keys = set(decoded.keys()).difference(set(self._split_keys))
+ if bad_keys:
+ bad_keys_joined = ", ".join(bad_keys)
+ raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}")
+
+ @final
+ def parse(self):
+ self._parse()
+
+ if self.obj is None:
+ return None
+ if self.convert_axes:
+ self._convert_axes()
+ self._try_convert_types()
+ return self.obj
+
+ def _parse(self) -> None:
+ raise AbstractMethodError(self)
+
+ @final
+ def _convert_axes(self) -> None:
+ """
+ Try to convert axes.
+ """
+ obj = self.obj
+ assert obj is not None # for mypy
+ for axis_name in obj._AXIS_ORDERS:
+ ax = obj._get_axis(axis_name)
+ ser = Series(ax, dtype=ax.dtype, copy=False)
+ new_ser, result = self._try_convert_data(
+ name=axis_name,
+ data=ser,
+ use_dtypes=False,
+ convert_dates=True,
+ is_axis=True,
+ )
+ if result:
+ new_axis = Index(new_ser, dtype=new_ser.dtype, copy=False)
+ setattr(self.obj, axis_name, new_axis)
+
+ def _try_convert_types(self) -> None:
+ raise AbstractMethodError(self)
+
+ @final
+ def _try_convert_data(
+ self,
+ name: Hashable,
+ data: Series,
+ use_dtypes: bool = True,
+ convert_dates: bool | list[str] = True,
+ is_axis: bool = False,
+ ) -> tuple[Series, bool]:
+ """
+ Try to parse a Series into a column by inferring dtype.
+ """
+ # don't try to coerce, unless a force conversion
+ if use_dtypes:
+ if not self.dtype:
+ if all(notna(data)):
+ return data, False
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ "Downcasting object dtype arrays",
+ category=FutureWarning,
+ )
+ filled = data.fillna(np.nan)
+
+ return filled, True
+
+ elif self.dtype is True:
+ pass
+ else:
+ # dtype to force
+ dtype = (
+ self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype
+ )
+ if dtype is not None:
+ try:
+ return data.astype(dtype), True
+ except (TypeError, ValueError):
+ return data, False
+
+ if convert_dates:
+ new_data, result = self._try_convert_to_date(data)
+ if result:
+ return new_data, True
+
+ converted = False
+ if self.dtype_backend is not lib.no_default and not is_axis:
+ # Fall through for conversion later on
+ return data, True
+ elif is_string_dtype(data.dtype):
+ # try float
+ try:
+ data = data.astype("float64")
+ converted = True
+ except (TypeError, ValueError):
+ pass
+
+ if data.dtype.kind == "f" and data.dtype != "float64":
+ # coerce floats to 64
+ try:
+ data = data.astype("float64")
+ converted = True
+ except (TypeError, ValueError):
+ pass
+
+ # don't coerce 0-len data
+ if len(data) and data.dtype in ("float", "object"):
+ # coerce ints if we can
+ try:
+ new_data = data.astype("int64")
+ if (new_data == data).all():
+ data = new_data
+ converted = True
+ except (TypeError, ValueError, OverflowError):
+ pass
+
+ if data.dtype == "int" and data.dtype != "int64":
+ # coerce ints to 64
+ try:
+ data = data.astype("int64")
+ converted = True
+ except (TypeError, ValueError):
+ pass
+
+ # if we have an index, we want to preserve dtypes
+ if name == "index" and len(data):
+ if self.orient == "split":
+ return data, False
+
+ return data, converted
+
+ @final
+ def _try_convert_to_date(self, data: Series) -> tuple[Series, bool]:
+ """
+ Try to parse a ndarray like into a date column.
+
+ Try to coerce object in epoch/iso formats and integer/float in epoch
+ formats. Return a boolean if parsing was successful.
+ """
+ # no conversion on empty
+ if not len(data):
+ return data, False
+
+ new_data = data
+
+ if new_data.dtype == "string":
+ new_data = new_data.astype(object)
+
+ if new_data.dtype == "object":
+ try:
+ new_data = data.astype("int64")
+ except OverflowError:
+ return data, False
+ except (TypeError, ValueError):
+ pass
+
+ # ignore numbers that are out of range
+ if issubclass(new_data.dtype.type, np.number):
+ in_range = (
+ isna(new_data._values)
+ | (new_data > self.min_stamp)
+ | (new_data._values == iNaT)
+ )
+ if not in_range.all():
+ return data, False
+
+ date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
+ for date_unit in date_units:
+ try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ ".*parsing datetimes with mixed time "
+ "zones will raise an error",
+ category=FutureWarning,
+ )
+ new_data = to_datetime(new_data, errors="raise", unit=date_unit)
+ except (ValueError, OverflowError, TypeError):
+ continue
+ return new_data, True
+ return data, False
+
+
+class SeriesParser(Parser):
+ _default_orient = "index"
+ _split_keys = ("name", "index", "data")
+ obj: Series | None
+
+ def _parse(self) -> None:
+ data = ujson_loads(self.json, precise_float=self.precise_float)
+
+ if self.orient == "split":
+ decoded = {str(k): v for k, v in data.items()}
+ self.check_keys_split(decoded)
+ self.obj = Series(**decoded)
+ else:
+ self.obj = Series(data)
+
+ def _try_convert_types(self) -> None:
+ if self.obj is None:
+ return
+ obj, result = self._try_convert_data(
+ "data", self.obj, convert_dates=self.convert_dates
+ )
+ if result:
+ self.obj = obj
+
+
+class FrameParser(Parser):
+ _default_orient = "columns"
+ _split_keys = ("columns", "index", "data")
+ obj: DataFrame | None
+
+ def _parse(self) -> None:
+ json = self.json
+ orient = self.orient
+
+ if orient == "columns":
+ self.obj = DataFrame(
+ ujson_loads(json, precise_float=self.precise_float), dtype=None
+ )
+ elif orient == "split":
+ decoded = {
+ str(k): v
+ for k, v in ujson_loads(json, precise_float=self.precise_float).items()
+ }
+ self.check_keys_split(decoded)
+ orig_names = [
+ (tuple(col) if isinstance(col, list) else col)
+ for col in decoded["columns"]
+ ]
+ decoded["columns"] = dedup_names(
+ orig_names,
+ is_potential_multi_index(orig_names, None),
+ )
+ self.obj = DataFrame(dtype=None, **decoded)
+ elif orient == "index":
+ self.obj = DataFrame.from_dict(
+ ujson_loads(json, precise_float=self.precise_float),
+ dtype=None,
+ orient="index",
+ )
+ elif orient == "table":
+ self.obj = parse_table_schema(json, precise_float=self.precise_float)
+ else:
+ self.obj = DataFrame(
+ ujson_loads(json, precise_float=self.precise_float), dtype=None
+ )
+
+ def _process_converter(
+ self,
+ f: Callable[[Hashable, Series], tuple[Series, bool]],
+ filt: Callable[[Hashable], bool] | None = None,
+ ) -> None:
+ """
+ Take a conversion function and possibly recreate the frame.
+ """
+ if filt is None:
+ filt = lambda col: True
+
+ obj = self.obj
+ assert obj is not None # for mypy
+
+ needs_new_obj = False
+ new_obj = {}
+ for i, (col, c) in enumerate(obj.items()):
+ if filt(col):
+ new_data, result = f(col, c)
+ if result:
+ c = new_data
+ needs_new_obj = True
+ new_obj[i] = c
+
+ if needs_new_obj:
+ # possibly handle dup columns
+ new_frame = DataFrame(new_obj, index=obj.index)
+ new_frame.columns = obj.columns
+ self.obj = new_frame
+
+ def _try_convert_types(self) -> None:
+ if self.obj is None:
+ return
+ if self.convert_dates:
+ self._try_convert_dates()
+
+ self._process_converter(
+ lambda col, c: self._try_convert_data(col, c, convert_dates=False)
+ )
+
+ def _try_convert_dates(self) -> None:
+ if self.obj is None:
+ return
+
+ # our columns to parse
+ convert_dates_list_bool = self.convert_dates
+ if isinstance(convert_dates_list_bool, bool):
+ convert_dates_list_bool = []
+ convert_dates = set(convert_dates_list_bool)
+
+ def is_ok(col) -> bool:
+ """
+ Return if this col is ok to try for a date parse.
+ """
+ if col in convert_dates:
+ return True
+ if not self.keep_default_dates:
+ return False
+ if not isinstance(col, str):
+ return False
+
+ col_lower = col.lower()
+ if (
+ col_lower.endswith(("_at", "_time"))
+ or col_lower == "modified"
+ or col_lower == "date"
+ or col_lower == "datetime"
+ or col_lower.startswith("timestamp")
+ ):
+ return True
+ return False
+
+ self._process_converter(lambda col, c: self._try_convert_to_date(c), filt=is_ok)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/json/_normalize.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/_normalize.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1e2210f9d8940a0931b07e1631350089140ff95
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/_normalize.py
@@ -0,0 +1,544 @@
+# ---------------------------------------------------------------------
+# JSON normalization routines
+from __future__ import annotations
+
+from collections import (
+ abc,
+ defaultdict,
+)
+import copy
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ DefaultDict,
+)
+
+import numpy as np
+
+from pandas._libs.writers import convert_json_to_lines
+
+import pandas as pd
+from pandas import DataFrame
+
+if TYPE_CHECKING:
+ from collections.abc import Iterable
+
+ from pandas._typing import (
+ IgnoreRaise,
+ Scalar,
+ )
+
+
+def convert_to_line_delimits(s: str) -> str:
+ """
+ Helper function that converts JSON lists to line delimited JSON.
+ """
+ # Determine we have a JSON list to turn to lines otherwise just return the
+ # json object, only lists can
+ if not s[0] == "[" and s[-1] == "]":
+ return s
+ s = s[1:-1]
+
+ return convert_json_to_lines(s)
+
+
+def nested_to_record(
+ ds,
+ prefix: str = "",
+ sep: str = ".",
+ level: int = 0,
+ max_level: int | None = None,
+):
+ """
+ A simplified json_normalize
+
+ Converts a nested dict into a flat dict ("record"), unlike json_normalize,
+ it does not attempt to extract a subset of the data.
+
+ Parameters
+ ----------
+ ds : dict or list of dicts
+ prefix: the prefix, optional, default: ""
+ sep : str, default '.'
+ Nested records will generate names separated by sep,
+ e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
+ level: int, optional, default: 0
+ The number of levels in the json string.
+
+ max_level: int, optional, default: None
+ The max depth to normalize.
+
+ Returns
+ -------
+ d - dict or list of dicts, matching `ds`
+
+ Examples
+ --------
+ >>> nested_to_record(
+ ... dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2))
+ ... )
+ {\
+'flat1': 1, \
+'dict1.c': 1, \
+'dict1.d': 2, \
+'nested.e.c': 1, \
+'nested.e.d': 2, \
+'nested.d': 2\
+}
+ """
+ singleton = False
+ if isinstance(ds, dict):
+ ds = [ds]
+ singleton = True
+ new_ds = []
+ for d in ds:
+ new_d = copy.deepcopy(d)
+ for k, v in d.items():
+ # each key gets renamed with prefix
+ if not isinstance(k, str):
+ k = str(k)
+ if level == 0:
+ newkey = k
+ else:
+ newkey = prefix + sep + k
+
+ # flatten if type is dict and
+ # current dict level < maximum level provided and
+ # only dicts gets recurse-flattened
+ # only at level>1 do we rename the rest of the keys
+ if not isinstance(v, dict) or (
+ max_level is not None and level >= max_level
+ ):
+ if level != 0: # so we skip copying for top level, common case
+ v = new_d.pop(k)
+ new_d[newkey] = v
+ continue
+
+ v = new_d.pop(k)
+ new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level))
+ new_ds.append(new_d)
+
+ if singleton:
+ return new_ds[0]
+ return new_ds
+
+
+def _normalise_json(
+ data: Any,
+ key_string: str,
+ normalized_dict: dict[str, Any],
+ separator: str,
+) -> dict[str, Any]:
+ """
+ Main recursive function
+ Designed for the most basic use case of pd.json_normalize(data)
+ intended as a performance improvement, see #15621
+
+ Parameters
+ ----------
+ data : Any
+ Type dependent on types contained within nested Json
+ key_string : str
+ New key (with separator(s) in) for data
+ normalized_dict : dict
+ The new normalized/flattened Json dict
+ separator : str, default '.'
+ Nested records will generate names separated by sep,
+ e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
+ """
+ if isinstance(data, dict):
+ for key, value in data.items():
+ new_key = f"{key_string}{separator}{key}"
+
+ if not key_string:
+ new_key = new_key.removeprefix(separator)
+
+ _normalise_json(
+ data=value,
+ key_string=new_key,
+ normalized_dict=normalized_dict,
+ separator=separator,
+ )
+ else:
+ normalized_dict[key_string] = data
+ return normalized_dict
+
+
+def _normalise_json_ordered(data: dict[str, Any], separator: str) -> dict[str, Any]:
+ """
+ Order the top level keys and then recursively go to depth
+
+ Parameters
+ ----------
+ data : dict or list of dicts
+ separator : str, default '.'
+ Nested records will generate names separated by sep,
+ e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
+
+ Returns
+ -------
+ dict or list of dicts, matching `normalised_json_object`
+ """
+ top_dict_ = {k: v for k, v in data.items() if not isinstance(v, dict)}
+ nested_dict_ = _normalise_json(
+ data={k: v for k, v in data.items() if isinstance(v, dict)},
+ key_string="",
+ normalized_dict={},
+ separator=separator,
+ )
+ return {**top_dict_, **nested_dict_}
+
+
+def _simple_json_normalize(
+ ds: dict | list[dict],
+ sep: str = ".",
+) -> dict | list[dict] | Any:
+ """
+ A optimized basic json_normalize
+
+ Converts a nested dict into a flat dict ("record"), unlike
+ json_normalize and nested_to_record it doesn't do anything clever.
+ But for the most basic use cases it enhances performance.
+ E.g. pd.json_normalize(data)
+
+ Parameters
+ ----------
+ ds : dict or list of dicts
+ sep : str, default '.'
+ Nested records will generate names separated by sep,
+ e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
+
+ Returns
+ -------
+ frame : DataFrame
+ d - dict or list of dicts, matching `normalised_json_object`
+
+ Examples
+ --------
+ >>> _simple_json_normalize(
+ ... {
+ ... "flat1": 1,
+ ... "dict1": {"c": 1, "d": 2},
+ ... "nested": {"e": {"c": 1, "d": 2}, "d": 2},
+ ... }
+ ... )
+ {\
+'flat1': 1, \
+'dict1.c': 1, \
+'dict1.d': 2, \
+'nested.e.c': 1, \
+'nested.e.d': 2, \
+'nested.d': 2\
+}
+
+ """
+ normalised_json_object = {}
+ # expect a dictionary, as most jsons are. However, lists are perfectly valid
+ if isinstance(ds, dict):
+ normalised_json_object = _normalise_json_ordered(data=ds, separator=sep)
+ elif isinstance(ds, list):
+ normalised_json_list = [_simple_json_normalize(row, sep=sep) for row in ds]
+ return normalised_json_list
+ return normalised_json_object
+
+
+def json_normalize(
+ data: dict | list[dict],
+ record_path: str | list | None = None,
+ meta: str | list[str | list[str]] | None = None,
+ meta_prefix: str | None = None,
+ record_prefix: str | None = None,
+ errors: IgnoreRaise = "raise",
+ sep: str = ".",
+ max_level: int | None = None,
+) -> DataFrame:
+ """
+ Normalize semi-structured JSON data into a flat table.
+
+ Parameters
+ ----------
+ data : dict or list of dicts
+ Unserialized JSON objects.
+ record_path : str or list of str, default None
+ Path in each object to list of records. If not passed, data will be
+ assumed to be an array of records.
+ meta : list of paths (str or list of str), default None
+ Fields to use as metadata for each record in resulting table.
+ meta_prefix : str, default None
+ If True, prefix records with dotted (?) path, e.g. foo.bar.field if
+ meta is ['foo', 'bar'].
+ record_prefix : str, default None
+ If True, prefix records with dotted (?) path, e.g. foo.bar.field if
+ path to records is ['foo', 'bar'].
+ errors : {'raise', 'ignore'}, default 'raise'
+ Configures error handling.
+
+ * 'ignore' : will ignore KeyError if keys listed in meta are not
+ always present.
+ * 'raise' : will raise KeyError if keys listed in meta are not
+ always present.
+ sep : str, default '.'
+ Nested records will generate names separated by sep.
+ e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar.
+ max_level : int, default None
+ Max number of levels(depth of dict) to normalize.
+ if None, normalizes all levels.
+
+ Returns
+ -------
+ frame : DataFrame
+ Normalize semi-structured JSON data into a flat table.
+
+ Examples
+ --------
+ >>> data = [
+ ... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}},
+ ... {"name": {"given": "Mark", "family": "Regner"}},
+ ... {"id": 2, "name": "Faye Raker"},
+ ... ]
+ >>> pd.json_normalize(data)
+ id name.first name.last name.given name.family name
+ 0 1.0 Coleen Volk NaN NaN NaN
+ 1 NaN NaN NaN Mark Regner NaN
+ 2 2.0 NaN NaN NaN NaN Faye Raker
+
+ >>> data = [
+ ... {
+ ... "id": 1,
+ ... "name": "Cole Volk",
+ ... "fitness": {"height": 130, "weight": 60},
+ ... },
+ ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}},
+ ... {
+ ... "id": 2,
+ ... "name": "Faye Raker",
+ ... "fitness": {"height": 130, "weight": 60},
+ ... },
+ ... ]
+ >>> pd.json_normalize(data, max_level=0)
+ id name fitness
+ 0 1.0 Cole Volk {'height': 130, 'weight': 60}
+ 1 NaN Mark Reg {'height': 130, 'weight': 60}
+ 2 2.0 Faye Raker {'height': 130, 'weight': 60}
+
+ Normalizes nested data up to level 1.
+
+ >>> data = [
+ ... {
+ ... "id": 1,
+ ... "name": "Cole Volk",
+ ... "fitness": {"height": 130, "weight": 60},
+ ... },
+ ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}},
+ ... {
+ ... "id": 2,
+ ... "name": "Faye Raker",
+ ... "fitness": {"height": 130, "weight": 60},
+ ... },
+ ... ]
+ >>> pd.json_normalize(data, max_level=1)
+ id name fitness.height fitness.weight
+ 0 1.0 Cole Volk 130 60
+ 1 NaN Mark Reg 130 60
+ 2 2.0 Faye Raker 130 60
+
+ >>> data = [
+ ... {
+ ... "state": "Florida",
+ ... "shortname": "FL",
+ ... "info": {"governor": "Rick Scott"},
+ ... "counties": [
+ ... {"name": "Dade", "population": 12345},
+ ... {"name": "Broward", "population": 40000},
+ ... {"name": "Palm Beach", "population": 60000},
+ ... ],
+ ... },
+ ... {
+ ... "state": "Ohio",
+ ... "shortname": "OH",
+ ... "info": {"governor": "John Kasich"},
+ ... "counties": [
+ ... {"name": "Summit", "population": 1234},
+ ... {"name": "Cuyahoga", "population": 1337},
+ ... ],
+ ... },
+ ... ]
+ >>> result = pd.json_normalize(
+ ... data, "counties", ["state", "shortname", ["info", "governor"]]
+ ... )
+ >>> result
+ name population state shortname info.governor
+ 0 Dade 12345 Florida FL Rick Scott
+ 1 Broward 40000 Florida FL Rick Scott
+ 2 Palm Beach 60000 Florida FL Rick Scott
+ 3 Summit 1234 Ohio OH John Kasich
+ 4 Cuyahoga 1337 Ohio OH John Kasich
+
+ >>> data = {"A": [1, 2]}
+ >>> pd.json_normalize(data, "A", record_prefix="Prefix.")
+ Prefix.0
+ 0 1
+ 1 2
+
+ Returns normalized data with columns prefixed with the given string.
+ """
+
+ def _pull_field(
+ js: dict[str, Any], spec: list | str, extract_record: bool = False
+ ) -> Scalar | Iterable:
+ """Internal function to pull field"""
+ result = js
+ try:
+ if isinstance(spec, list):
+ for field in spec:
+ if result is None:
+ raise KeyError(field)
+ result = result[field]
+ else:
+ result = result[spec]
+ except KeyError as e:
+ if extract_record:
+ raise KeyError(
+ f"Key {e} not found. If specifying a record_path, all elements of "
+ f"data should have the path."
+ ) from e
+ if errors == "ignore":
+ return np.nan
+ else:
+ raise KeyError(
+ f"Key {e} not found. To replace missing values of {e} with "
+ f"np.nan, pass in errors='ignore'"
+ ) from e
+
+ return result
+
+ def _pull_records(js: dict[str, Any], spec: list | str) -> list:
+ """
+ Internal function to pull field for records, and similar to
+ _pull_field, but require to return list. And will raise error
+ if has non iterable value.
+ """
+ result = _pull_field(js, spec, extract_record=True)
+
+ # GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not
+ # null, otherwise return an empty list
+ if not isinstance(result, list):
+ if pd.isnull(result):
+ result = []
+ else:
+ raise TypeError(
+ f"{js} has non list value {result} for path {spec}. "
+ "Must be list or null."
+ )
+ return result
+
+ if isinstance(data, list) and not data:
+ return DataFrame()
+ elif isinstance(data, dict):
+ # A bit of a hackjob
+ data = [data]
+ elif isinstance(data, abc.Iterable) and not isinstance(data, str):
+ # GH35923 Fix pd.json_normalize to not skip the first element of a
+ # generator input
+ data = list(data)
+ else:
+ raise NotImplementedError
+
+ # check to see if a simple recursive function is possible to
+ # improve performance (see #15621) but only for cases such
+ # as pd.Dataframe(data) or pd.Dataframe(data, sep)
+ if (
+ record_path is None
+ and meta is None
+ and meta_prefix is None
+ and record_prefix is None
+ and max_level is None
+ ):
+ return DataFrame(_simple_json_normalize(data, sep=sep))
+
+ if record_path is None:
+ if any([isinstance(x, dict) for x in y.values()] for y in data):
+ # naive normalization, this is idempotent for flat records
+ # and potentially will inflate the data considerably for
+ # deeply nested structures:
+ # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
+ #
+ # TODO: handle record value which are lists, at least error
+ # reasonably
+ data = nested_to_record(data, sep=sep, max_level=max_level)
+ return DataFrame(data)
+ elif not isinstance(record_path, list):
+ record_path = [record_path]
+
+ if meta is None:
+ meta = []
+ elif not isinstance(meta, list):
+ meta = [meta]
+
+ _meta = [m if isinstance(m, list) else [m] for m in meta]
+
+ # Disastrously inefficient for now
+ records: list = []
+ lengths = []
+
+ meta_vals: DefaultDict = defaultdict(list)
+ meta_keys = [sep.join(val) for val in _meta]
+
+ def _recursive_extract(data, path, seen_meta, level: int = 0) -> None:
+ if isinstance(data, dict):
+ data = [data]
+ if len(path) > 1:
+ for obj in data:
+ for val, key in zip(_meta, meta_keys):
+ if level + 1 == len(val):
+ seen_meta[key] = _pull_field(obj, val[-1])
+
+ _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
+ else:
+ for obj in data:
+ recs = _pull_records(obj, path[0])
+ recs = [
+ nested_to_record(r, sep=sep, max_level=max_level)
+ if isinstance(r, dict)
+ else r
+ for r in recs
+ ]
+
+ # For repeating the metadata later
+ lengths.append(len(recs))
+ for val, key in zip(_meta, meta_keys):
+ if level + 1 > len(val):
+ meta_val = seen_meta[key]
+ else:
+ meta_val = _pull_field(obj, val[level:])
+ meta_vals[key].append(meta_val)
+ records.extend(recs)
+
+ _recursive_extract(data, record_path, {}, level=0)
+
+ result = DataFrame(records)
+
+ if record_prefix is not None:
+ result = result.rename(columns=lambda x: f"{record_prefix}{x}")
+
+ # Data types, a problem
+ for k, v in meta_vals.items():
+ if meta_prefix is not None:
+ k = meta_prefix + k
+
+ if k in result:
+ raise ValueError(
+ f"Conflicting metadata name {k}, need distinguishing prefix "
+ )
+ # GH 37782
+
+ values = np.array(v, dtype=object)
+
+ if values.ndim > 1:
+ # GH 37782
+ values = np.empty((len(v),), dtype=object)
+ for i, v in enumerate(v):
+ values[i] = v
+
+ result[k] = values.repeat(lengths)
+ return result
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/json/_table_schema.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/_table_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d9fba72cf1733e8893e194c29d5a00bc3ad1d5c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/json/_table_schema.py
@@ -0,0 +1,389 @@
+"""
+Table Schema builders
+
+https://specs.frictionlessdata.io/table-schema/
+"""
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ cast,
+)
+import warnings
+
+from pandas._libs import lib
+from pandas._libs.json import ujson_loads
+from pandas._libs.tslibs import timezones
+from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.base import _registry as registry
+from pandas.core.dtypes.common import (
+ is_bool_dtype,
+ is_integer_dtype,
+ is_numeric_dtype,
+ is_string_dtype,
+)
+from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
+ DatetimeTZDtype,
+ ExtensionDtype,
+ PeriodDtype,
+)
+
+from pandas import DataFrame
+import pandas.core.common as com
+
+from pandas.tseries.frequencies import to_offset
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ DtypeObj,
+ JSONSerializable,
+ )
+
+ from pandas import Series
+ from pandas.core.indexes.multi import MultiIndex
+
+
+TABLE_SCHEMA_VERSION = "1.4.0"
+
+
+def as_json_table_type(x: DtypeObj) -> str:
+ """
+ Convert a NumPy / pandas type to its corresponding json_table.
+
+ Parameters
+ ----------
+ x : np.dtype or ExtensionDtype
+
+ Returns
+ -------
+ str
+ the Table Schema data types
+
+ Notes
+ -----
+ This table shows the relationship between NumPy / pandas dtypes,
+ and Table Schema dtypes.
+
+ ============== =================
+ Pandas type Table Schema type
+ ============== =================
+ int64 integer
+ float64 number
+ bool boolean
+ datetime64[ns] datetime
+ timedelta64[ns] duration
+ object str
+ categorical any
+ =============== =================
+ """
+ if is_integer_dtype(x):
+ return "integer"
+ elif is_bool_dtype(x):
+ return "boolean"
+ elif is_numeric_dtype(x):
+ return "number"
+ elif lib.is_np_dtype(x, "M") or isinstance(x, (DatetimeTZDtype, PeriodDtype)):
+ return "datetime"
+ elif lib.is_np_dtype(x, "m"):
+ return "duration"
+ elif isinstance(x, ExtensionDtype):
+ return "any"
+ elif is_string_dtype(x):
+ return "string"
+ else:
+ return "any"
+
+
+def set_default_names(data):
+ """Sets index names to 'index' for regular, or 'level_x' for Multi"""
+ if com.all_not_none(*data.index.names):
+ nms = data.index.names
+ if len(nms) == 1 and data.index.name == "index":
+ warnings.warn(
+ "Index name of 'index' is not round-trippable.",
+ stacklevel=find_stack_level(),
+ )
+ elif len(nms) > 1 and any(x.startswith("level_") for x in nms):
+ warnings.warn(
+ "Index names beginning with 'level_' are not round-trippable.",
+ stacklevel=find_stack_level(),
+ )
+ return data
+
+ data = data.copy()
+ if data.index.nlevels > 1:
+ data.index.names = com.fill_missing_names(data.index.names)
+ else:
+ data.index.name = data.index.name or "index"
+ return data
+
+
+def convert_pandas_type_to_json_field(arr) -> dict[str, JSONSerializable]:
+ dtype = arr.dtype
+ name: JSONSerializable
+ if arr.name is None:
+ name = "values"
+ else:
+ name = arr.name
+ field: dict[str, JSONSerializable] = {
+ "name": name,
+ "type": as_json_table_type(dtype),
+ }
+
+ if isinstance(dtype, CategoricalDtype):
+ cats = dtype.categories
+ ordered = dtype.ordered
+
+ field["constraints"] = {"enum": list(cats)}
+ field["ordered"] = ordered
+ elif isinstance(dtype, PeriodDtype):
+ field["freq"] = dtype.freq.freqstr
+ elif isinstance(dtype, DatetimeTZDtype):
+ if timezones.is_utc(dtype.tz):
+ # timezone.utc has no "zone" attr
+ field["tz"] = "UTC"
+ else:
+ # error: "tzinfo" has no attribute "zone"
+ field["tz"] = dtype.tz.zone # type: ignore[attr-defined]
+ elif isinstance(dtype, ExtensionDtype):
+ field["extDtype"] = dtype.name
+ return field
+
+
+def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype:
+ """
+ Converts a JSON field descriptor into its corresponding NumPy / pandas type
+
+ Parameters
+ ----------
+ field
+ A JSON field descriptor
+
+ Returns
+ -------
+ dtype
+
+ Raises
+ ------
+ ValueError
+ If the type of the provided field is unknown or currently unsupported
+
+ Examples
+ --------
+ >>> convert_json_field_to_pandas_type({"name": "an_int", "type": "integer"})
+ 'int64'
+
+ >>> convert_json_field_to_pandas_type(
+ ... {
+ ... "name": "a_categorical",
+ ... "type": "any",
+ ... "constraints": {"enum": ["a", "b", "c"]},
+ ... "ordered": True,
+ ... }
+ ... )
+ CategoricalDtype(categories=['a', 'b', 'c'], ordered=True, categories_dtype=object)
+
+ >>> convert_json_field_to_pandas_type({"name": "a_datetime", "type": "datetime"})
+ 'datetime64[ns]'
+
+ >>> convert_json_field_to_pandas_type(
+ ... {"name": "a_datetime_with_tz", "type": "datetime", "tz": "US/Central"}
+ ... )
+ 'datetime64[ns, US/Central]'
+ """
+ typ = field["type"]
+ if typ == "string":
+ return "object"
+ elif typ == "integer":
+ return field.get("extDtype", "int64")
+ elif typ == "number":
+ return field.get("extDtype", "float64")
+ elif typ == "boolean":
+ return field.get("extDtype", "bool")
+ elif typ == "duration":
+ return "timedelta64"
+ elif typ == "datetime":
+ if field.get("tz"):
+ return f"datetime64[ns, {field['tz']}]"
+ elif field.get("freq"):
+ # GH#9586 rename frequency M to ME for offsets
+ offset = to_offset(field["freq"])
+ freq_n, freq_name = offset.n, offset.name
+ freq = freq_to_period_freqstr(freq_n, freq_name)
+ # GH#47747 using datetime over period to minimize the change surface
+ return f"period[{freq}]"
+ else:
+ return "datetime64[ns]"
+ elif typ == "any":
+ if "constraints" in field and "ordered" in field:
+ return CategoricalDtype(
+ categories=field["constraints"]["enum"], ordered=field["ordered"]
+ )
+ elif "extDtype" in field:
+ return registry.find(field["extDtype"])
+ else:
+ return "object"
+
+ raise ValueError(f"Unsupported or invalid field type: {typ}")
+
+
+def build_table_schema(
+ data: DataFrame | Series,
+ index: bool = True,
+ primary_key: bool | None = None,
+ version: bool = True,
+) -> dict[str, JSONSerializable]:
+ """
+ Create a Table schema from ``data``.
+
+ Parameters
+ ----------
+ data : Series, DataFrame
+ index : bool, default True
+ Whether to include ``data.index`` in the schema.
+ primary_key : bool or None, default True
+ Column names to designate as the primary key.
+ The default `None` will set `'primaryKey'` to the index
+ level or levels if the index is unique.
+ version : bool, default True
+ Whether to include a field `pandas_version` with the version
+ of pandas that last revised the table schema. This version
+ can be different from the installed pandas version.
+
+ Returns
+ -------
+ dict
+
+ Notes
+ -----
+ See `Table Schema
+ `__ for
+ conversion types.
+ Timedeltas as converted to ISO8601 duration format with
+ 9 decimal places after the seconds field for nanosecond precision.
+
+ Categoricals are converted to the `any` dtype, and use the `enum` field
+ constraint to list the allowed values. The `ordered` attribute is included
+ in an `ordered` field.
+
+ Examples
+ --------
+ >>> from pandas.io.json._table_schema import build_table_schema
+ >>> df = pd.DataFrame(
+ ... {'A': [1, 2, 3],
+ ... 'B': ['a', 'b', 'c'],
+ ... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
+ ... }, index=pd.Index(range(3), name='idx'))
+ >>> build_table_schema(df)
+ {'fields': \
+[{'name': 'idx', 'type': 'integer'}, \
+{'name': 'A', 'type': 'integer'}, \
+{'name': 'B', 'type': 'string'}, \
+{'name': 'C', 'type': 'datetime'}], \
+'primaryKey': ['idx'], \
+'pandas_version': '1.4.0'}
+ """
+ if index is True:
+ data = set_default_names(data)
+
+ schema: dict[str, Any] = {}
+ fields = []
+
+ if index:
+ if data.index.nlevels > 1:
+ data.index = cast("MultiIndex", data.index)
+ for level, name in zip(data.index.levels, data.index.names):
+ new_field = convert_pandas_type_to_json_field(level)
+ new_field["name"] = name
+ fields.append(new_field)
+ else:
+ fields.append(convert_pandas_type_to_json_field(data.index))
+
+ if data.ndim > 1:
+ for column, s in data.items():
+ fields.append(convert_pandas_type_to_json_field(s))
+ else:
+ fields.append(convert_pandas_type_to_json_field(data))
+
+ schema["fields"] = fields
+ if index and data.index.is_unique and primary_key is None:
+ if data.index.nlevels == 1:
+ schema["primaryKey"] = [data.index.name]
+ else:
+ schema["primaryKey"] = data.index.names
+ elif primary_key is not None:
+ schema["primaryKey"] = primary_key
+
+ if version:
+ schema["pandas_version"] = TABLE_SCHEMA_VERSION
+ return schema
+
+
+def parse_table_schema(json, precise_float: bool) -> DataFrame:
+ """
+ Builds a DataFrame from a given schema
+
+ Parameters
+ ----------
+ json :
+ A JSON table schema
+ precise_float : bool
+ Flag controlling precision when decoding string to double values, as
+ dictated by ``read_json``
+
+ Returns
+ -------
+ df : DataFrame
+
+ Raises
+ ------
+ NotImplementedError
+ If the JSON table schema contains either timezone or timedelta data
+
+ Notes
+ -----
+ Because :func:`DataFrame.to_json` uses the string 'index' to denote a
+ name-less :class:`Index`, this function sets the name of the returned
+ :class:`DataFrame` to ``None`` when said string is encountered with a
+ normal :class:`Index`. For a :class:`MultiIndex`, the same limitation
+ applies to any strings beginning with 'level_'. Therefore, an
+ :class:`Index` name of 'index' and :class:`MultiIndex` names starting
+ with 'level_' are not supported.
+
+ See Also
+ --------
+ build_table_schema : Inverse function.
+ pandas.read_json
+ """
+ table = ujson_loads(json, precise_float=precise_float)
+ col_order = [field["name"] for field in table["schema"]["fields"]]
+ df = DataFrame(table["data"], columns=col_order)[col_order]
+
+ dtypes = {
+ field["name"]: convert_json_field_to_pandas_type(field)
+ for field in table["schema"]["fields"]
+ }
+
+ # No ISO constructor for Timedelta as of yet, so need to raise
+ if "timedelta64" in dtypes.values():
+ raise NotImplementedError(
+ 'table="orient" can not yet read ISO-formatted Timedelta data'
+ )
+
+ df = df.astype(dtypes)
+
+ if "primaryKey" in table["schema"]:
+ df = df.set_index(table["schema"]["primaryKey"])
+ if len(df.index.names) == 1:
+ if df.index.name == "index":
+ df.index.name = None
+ else:
+ df.index.names = [
+ None if x.startswith("level_") else x for x in df.index.names
+ ]
+
+ return df
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/orc.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/orc.py
new file mode 100644
index 0000000000000000000000000000000000000000..fed9463c38d5deb907cb7df0adee03152380d7a0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/orc.py
@@ -0,0 +1,245 @@
+""" orc compat """
+from __future__ import annotations
+
+import io
+from types import ModuleType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Literal,
+)
+
+from pandas._config import using_pyarrow_string_dtype
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._validators import check_dtype_backend
+
+import pandas as pd
+from pandas.core.indexes.api import default_index
+
+from pandas.io._util import arrow_string_types_mapper
+from pandas.io.common import (
+ get_handle,
+ is_fsspec_url,
+)
+
+if TYPE_CHECKING:
+ import fsspec
+ import pyarrow.fs
+
+ from pandas._typing import (
+ DtypeBackend,
+ FilePath,
+ ReadBuffer,
+ WriteBuffer,
+ )
+
+ from pandas.core.frame import DataFrame
+
+
+def read_orc(
+ path: FilePath | ReadBuffer[bytes],
+ columns: list[str] | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem | None = None,
+ **kwargs: Any,
+) -> DataFrame:
+ """
+ Load an ORC object from the file path, returning a DataFrame.
+
+ Parameters
+ ----------
+ path : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function. The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be:
+ ``file://localhost/path/to/table.orc``.
+ columns : list, default None
+ If not None, only these columns will be read from the file.
+ Output always follows the ordering of the file and not the columns list.
+ This mirrors the original behaviour of
+ :external+pyarrow:py:meth:`pyarrow.orc.ORCFile.read`.
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ filesystem : fsspec or pyarrow filesystem, default None
+ Filesystem object to use when reading the parquet file.
+
+ .. versionadded:: 2.1.0
+
+ **kwargs
+ Any additional kwargs are passed to pyarrow.
+
+ Returns
+ -------
+ DataFrame
+
+ Notes
+ -----
+ Before using this function you should read the :ref:`user guide about ORC `
+ and :ref:`install optional dependencies `.
+
+ If ``path`` is a URI scheme pointing to a local or remote file (e.g. "s3://"),
+ a ``pyarrow.fs`` filesystem will be attempted to read the file. You can also pass a
+ pyarrow or fsspec filesystem object into the filesystem keyword to override this
+ behavior.
+
+ Examples
+ --------
+ >>> result = pd.read_orc("example_pa.orc") # doctest: +SKIP
+ """
+ # we require a newer version of pyarrow than we support for parquet
+
+ orc = import_optional_dependency("pyarrow.orc")
+
+ check_dtype_backend(dtype_backend)
+
+ with get_handle(path, "rb", is_text=False) as handles:
+ source = handles.handle
+ if is_fsspec_url(path) and filesystem is None:
+ pa = import_optional_dependency("pyarrow")
+ pa_fs = import_optional_dependency("pyarrow.fs")
+ try:
+ filesystem, source = pa_fs.FileSystem.from_uri(path)
+ except (TypeError, pa.ArrowInvalid):
+ pass
+
+ pa_table = orc.read_table(
+ source=source, columns=columns, filesystem=filesystem, **kwargs
+ )
+ if dtype_backend is not lib.no_default:
+ if dtype_backend == "pyarrow":
+ df = pa_table.to_pandas(types_mapper=pd.ArrowDtype)
+ else:
+ from pandas.io._util import _arrow_dtype_mapping
+
+ mapping = _arrow_dtype_mapping()
+ df = pa_table.to_pandas(types_mapper=mapping.get)
+ return df
+ else:
+ if using_pyarrow_string_dtype():
+ types_mapper = arrow_string_types_mapper()
+ else:
+ types_mapper = None
+ return pa_table.to_pandas(types_mapper=types_mapper)
+
+
+def to_orc(
+ df: DataFrame,
+ path: FilePath | WriteBuffer[bytes] | None = None,
+ *,
+ engine: Literal["pyarrow"] = "pyarrow",
+ index: bool | None = None,
+ engine_kwargs: dict[str, Any] | None = None,
+) -> bytes | None:
+ """
+ Write a DataFrame to the ORC format.
+
+ .. versionadded:: 1.5.0
+
+ Parameters
+ ----------
+ df : DataFrame
+ The dataframe to be written to ORC. Raises NotImplementedError
+ if dtype of one or more columns is category, unsigned integers,
+ intervals, periods or sparse.
+ path : str, file-like object or None, default None
+ If a string, it will be used as Root Directory path
+ when writing a partitioned dataset. By file-like object,
+ we refer to objects with a write() method, such as a file handle
+ (e.g. via builtin open function). If path is None,
+ a bytes object is returned.
+ engine : str, default 'pyarrow'
+ ORC library to use.
+ index : bool, optional
+ If ``True``, include the dataframe's index(es) in the file output. If
+ ``False``, they will not be written to the file.
+ If ``None``, similar to ``infer`` the dataframe's index(es)
+ will be saved. However, instead of being saved as values,
+ the RangeIndex will be stored as a range in the metadata so it
+ doesn't require much space and is faster. Other indexes will
+ be included as columns in the file output.
+ engine_kwargs : dict[str, Any] or None, default None
+ Additional keyword arguments passed to :func:`pyarrow.orc.write_table`.
+
+ Returns
+ -------
+ bytes if no path argument is provided else None
+
+ Raises
+ ------
+ NotImplementedError
+ Dtype of one or more columns is category, unsigned integers, interval,
+ period or sparse.
+ ValueError
+ engine is not pyarrow.
+
+ Notes
+ -----
+ * Before using this function you should read the
+ :ref:`user guide about ORC ` and
+ :ref:`install optional dependencies `.
+ * This function requires `pyarrow `_
+ library.
+ * For supported dtypes please refer to `supported ORC features in Arrow
+ `__.
+ * Currently timezones in datetime columns are not preserved when a
+ dataframe is converted into ORC files.
+ """
+ if index is None:
+ index = df.index.names[0] is not None
+ if engine_kwargs is None:
+ engine_kwargs = {}
+
+ # validate index
+ # --------------
+
+ # validate that we have only a default index
+ # raise on anything else as we don't serialize the index
+
+ if not df.index.equals(default_index(len(df))):
+ raise ValueError(
+ "orc does not support serializing a non-default index for the index; "
+ "you can .reset_index() to make the index into column(s)"
+ )
+
+ if df.index.name is not None:
+ raise ValueError("orc does not serialize index meta-data on a default index")
+
+ if engine != "pyarrow":
+ raise ValueError("engine must be 'pyarrow'")
+ engine = import_optional_dependency(engine, min_version="10.0.1")
+ pa = import_optional_dependency("pyarrow")
+ orc = import_optional_dependency("pyarrow.orc")
+
+ was_none = path is None
+ if was_none:
+ path = io.BytesIO()
+ assert path is not None # For mypy
+ with get_handle(path, "wb", is_text=False) as handles:
+ assert isinstance(engine, ModuleType) # For mypy
+ try:
+ orc.write_table(
+ engine.Table.from_pandas(df, preserve_index=index),
+ handles.handle,
+ **engine_kwargs,
+ )
+ except (TypeError, pa.ArrowNotImplementedError) as e:
+ raise NotImplementedError(
+ "The dtype of one or more columns is not supported yet."
+ ) from e
+
+ if was_none:
+ assert isinstance(path, io.BytesIO) # For mypy
+ return path.getvalue()
+ return None
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parquet.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/parquet.py
new file mode 100644
index 0000000000000000000000000000000000000000..9570d6f8b26bd85585e5a46145b7871f1bb6eb3a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/parquet.py
@@ -0,0 +1,676 @@
+""" parquet compat """
+from __future__ import annotations
+
+import io
+import json
+import os
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Literal,
+)
+import warnings
+from warnings import catch_warnings
+
+from pandas._config import using_pyarrow_string_dtype
+from pandas._config.config import _get_option
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import AbstractMethodError
+from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ get_option,
+)
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io._util import arrow_string_types_mapper
+from pandas.io.common import (
+ IOHandles,
+ get_handle,
+ is_fsspec_url,
+ is_url,
+ stringify_path,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ DtypeBackend,
+ FilePath,
+ ReadBuffer,
+ StorageOptions,
+ WriteBuffer,
+ )
+
+
+def get_engine(engine: str) -> BaseImpl:
+ """return our implementation"""
+ if engine == "auto":
+ engine = get_option("io.parquet.engine")
+
+ if engine == "auto":
+ # try engines in this order
+ engine_classes = [PyArrowImpl, FastParquetImpl]
+
+ error_msgs = ""
+ for engine_class in engine_classes:
+ try:
+ return engine_class()
+ except ImportError as err:
+ error_msgs += "\n - " + str(err)
+
+ raise ImportError(
+ "Unable to find a usable engine; "
+ "tried using: 'pyarrow', 'fastparquet'.\n"
+ "A suitable version of "
+ "pyarrow or fastparquet is required for parquet "
+ "support.\n"
+ "Trying to import the above resulted in these errors:"
+ f"{error_msgs}"
+ )
+
+ if engine == "pyarrow":
+ return PyArrowImpl()
+ elif engine == "fastparquet":
+ return FastParquetImpl()
+
+ raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
+
+
+def _get_path_or_handle(
+ path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
+ fs: Any,
+ storage_options: StorageOptions | None = None,
+ mode: str = "rb",
+ is_dir: bool = False,
+) -> tuple[
+ FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any
+]:
+ """File handling for PyArrow."""
+ path_or_handle = stringify_path(path)
+ if fs is not None:
+ pa_fs = import_optional_dependency("pyarrow.fs", errors="ignore")
+ fsspec = import_optional_dependency("fsspec", errors="ignore")
+ if pa_fs is not None and isinstance(fs, pa_fs.FileSystem):
+ if storage_options:
+ raise NotImplementedError(
+ "storage_options not supported with a pyarrow FileSystem."
+ )
+ elif fsspec is not None and isinstance(fs, fsspec.spec.AbstractFileSystem):
+ pass
+ else:
+ raise ValueError(
+ f"filesystem must be a pyarrow or fsspec FileSystem, "
+ f"not a {type(fs).__name__}"
+ )
+ if is_fsspec_url(path_or_handle) and fs is None:
+ if storage_options is None:
+ pa = import_optional_dependency("pyarrow")
+ pa_fs = import_optional_dependency("pyarrow.fs")
+
+ try:
+ fs, path_or_handle = pa_fs.FileSystem.from_uri(path)
+ except (TypeError, pa.ArrowInvalid):
+ pass
+ if fs is None:
+ fsspec = import_optional_dependency("fsspec")
+ fs, path_or_handle = fsspec.core.url_to_fs(
+ path_or_handle, **(storage_options or {})
+ )
+ elif storage_options and (not is_url(path_or_handle) or mode != "rb"):
+ # can't write to a remote url
+ # without making use of fsspec at the moment
+ raise ValueError("storage_options passed with buffer, or non-supported URL")
+
+ handles = None
+ if (
+ not fs
+ and not is_dir
+ and isinstance(path_or_handle, str)
+ and not os.path.isdir(path_or_handle)
+ ):
+ # use get_handle only when we are very certain that it is not a directory
+ # fsspec resources can also point to directories
+ # this branch is used for example when reading from non-fsspec URLs
+ handles = get_handle(
+ path_or_handle, mode, is_text=False, storage_options=storage_options
+ )
+ fs = None
+ path_or_handle = handles.handle
+ return path_or_handle, handles, fs
+
+
+class BaseImpl:
+ @staticmethod
+ def validate_dataframe(df: DataFrame) -> None:
+ if not isinstance(df, DataFrame):
+ raise ValueError("to_parquet only supports IO with DataFrames")
+
+ def write(self, df: DataFrame, path, compression, **kwargs):
+ raise AbstractMethodError(self)
+
+ def read(self, path, columns=None, **kwargs) -> DataFrame:
+ raise AbstractMethodError(self)
+
+
+class PyArrowImpl(BaseImpl):
+ def __init__(self) -> None:
+ import_optional_dependency(
+ "pyarrow", extra="pyarrow is required for parquet support."
+ )
+ import pyarrow.parquet
+
+ # import utils to register the pyarrow extension types
+ import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401
+
+ self.api = pyarrow
+
+ def write(
+ self,
+ df: DataFrame,
+ path: FilePath | WriteBuffer[bytes],
+ compression: str | None = "snappy",
+ index: bool | None = None,
+ storage_options: StorageOptions | None = None,
+ partition_cols: list[str] | None = None,
+ filesystem=None,
+ **kwargs,
+ ) -> None:
+ self.validate_dataframe(df)
+
+ from_pandas_kwargs: dict[str, Any] = {"schema": kwargs.pop("schema", None)}
+ if index is not None:
+ from_pandas_kwargs["preserve_index"] = index
+
+ table = self.api.Table.from_pandas(df, **from_pandas_kwargs)
+
+ if df.attrs:
+ df_metadata = {"PANDAS_ATTRS": json.dumps(df.attrs)}
+ existing_metadata = table.schema.metadata
+ merged_metadata = {**existing_metadata, **df_metadata}
+ table = table.replace_schema_metadata(merged_metadata)
+
+ path_or_handle, handles, filesystem = _get_path_or_handle(
+ path,
+ filesystem,
+ storage_options=storage_options,
+ mode="wb",
+ is_dir=partition_cols is not None,
+ )
+ if (
+ isinstance(path_or_handle, io.BufferedWriter)
+ and hasattr(path_or_handle, "name")
+ and isinstance(path_or_handle.name, (str, bytes))
+ ):
+ if isinstance(path_or_handle.name, bytes):
+ path_or_handle = path_or_handle.name.decode()
+ else:
+ path_or_handle = path_or_handle.name
+
+ try:
+ if partition_cols is not None:
+ # writes to multiple files under the given path
+ self.api.parquet.write_to_dataset(
+ table,
+ path_or_handle,
+ compression=compression,
+ partition_cols=partition_cols,
+ filesystem=filesystem,
+ **kwargs,
+ )
+ else:
+ # write to single output file
+ self.api.parquet.write_table(
+ table,
+ path_or_handle,
+ compression=compression,
+ filesystem=filesystem,
+ **kwargs,
+ )
+ finally:
+ if handles is not None:
+ handles.close()
+
+ def read(
+ self,
+ path,
+ columns=None,
+ filters=None,
+ use_nullable_dtypes: bool = False,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ storage_options: StorageOptions | None = None,
+ filesystem=None,
+ **kwargs,
+ ) -> DataFrame:
+ kwargs["use_pandas_metadata"] = True
+
+ to_pandas_kwargs = {}
+ if dtype_backend == "numpy_nullable":
+ from pandas.io._util import _arrow_dtype_mapping
+
+ mapping = _arrow_dtype_mapping()
+ to_pandas_kwargs["types_mapper"] = mapping.get
+ elif dtype_backend == "pyarrow":
+ to_pandas_kwargs["types_mapper"] = pd.ArrowDtype # type: ignore[assignment]
+ elif using_pyarrow_string_dtype():
+ to_pandas_kwargs["types_mapper"] = arrow_string_types_mapper()
+
+ manager = _get_option("mode.data_manager", silent=True)
+ if manager == "array":
+ to_pandas_kwargs["split_blocks"] = True # type: ignore[assignment]
+
+ path_or_handle, handles, filesystem = _get_path_or_handle(
+ path,
+ filesystem,
+ storage_options=storage_options,
+ mode="rb",
+ )
+ try:
+ pa_table = self.api.parquet.read_table(
+ path_or_handle,
+ columns=columns,
+ filesystem=filesystem,
+ filters=filters,
+ **kwargs,
+ )
+ result = pa_table.to_pandas(**to_pandas_kwargs)
+
+ if manager == "array":
+ result = result._as_manager("array", copy=False)
+
+ if pa_table.schema.metadata:
+ if b"PANDAS_ATTRS" in pa_table.schema.metadata:
+ df_metadata = pa_table.schema.metadata[b"PANDAS_ATTRS"]
+ result.attrs = json.loads(df_metadata)
+ return result
+ finally:
+ if handles is not None:
+ handles.close()
+
+
+class FastParquetImpl(BaseImpl):
+ def __init__(self) -> None:
+ # since pandas is a dependency of fastparquet
+ # we need to import on first use
+ fastparquet = import_optional_dependency(
+ "fastparquet", extra="fastparquet is required for parquet support."
+ )
+ self.api = fastparquet
+
+ def write(
+ self,
+ df: DataFrame,
+ path,
+ compression: Literal["snappy", "gzip", "brotli"] | None = "snappy",
+ index=None,
+ partition_cols=None,
+ storage_options: StorageOptions | None = None,
+ filesystem=None,
+ **kwargs,
+ ) -> None:
+ self.validate_dataframe(df)
+
+ if "partition_on" in kwargs and partition_cols is not None:
+ raise ValueError(
+ "Cannot use both partition_on and "
+ "partition_cols. Use partition_cols for partitioning data"
+ )
+ if "partition_on" in kwargs:
+ partition_cols = kwargs.pop("partition_on")
+
+ if partition_cols is not None:
+ kwargs["file_scheme"] = "hive"
+
+ if filesystem is not None:
+ raise NotImplementedError(
+ "filesystem is not implemented for the fastparquet engine."
+ )
+
+ # cannot use get_handle as write() does not accept file buffers
+ path = stringify_path(path)
+ if is_fsspec_url(path):
+ fsspec = import_optional_dependency("fsspec")
+
+ # if filesystem is provided by fsspec, file must be opened in 'wb' mode.
+ kwargs["open_with"] = lambda path, _: fsspec.open(
+ path, "wb", **(storage_options or {})
+ ).open()
+ elif storage_options:
+ raise ValueError(
+ "storage_options passed with file object or non-fsspec file path"
+ )
+
+ with catch_warnings(record=True):
+ self.api.write(
+ path,
+ df,
+ compression=compression,
+ write_index=index,
+ partition_on=partition_cols,
+ **kwargs,
+ )
+
+ def read(
+ self,
+ path,
+ columns=None,
+ filters=None,
+ storage_options: StorageOptions | None = None,
+ filesystem=None,
+ **kwargs,
+ ) -> DataFrame:
+ parquet_kwargs: dict[str, Any] = {}
+ use_nullable_dtypes = kwargs.pop("use_nullable_dtypes", False)
+ dtype_backend = kwargs.pop("dtype_backend", lib.no_default)
+ # We are disabling nullable dtypes for fastparquet pending discussion
+ parquet_kwargs["pandas_nulls"] = False
+ if use_nullable_dtypes:
+ raise ValueError(
+ "The 'use_nullable_dtypes' argument is not supported for the "
+ "fastparquet engine"
+ )
+ if dtype_backend is not lib.no_default:
+ raise ValueError(
+ "The 'dtype_backend' argument is not supported for the "
+ "fastparquet engine"
+ )
+ if filesystem is not None:
+ raise NotImplementedError(
+ "filesystem is not implemented for the fastparquet engine."
+ )
+ path = stringify_path(path)
+ handles = None
+ if is_fsspec_url(path):
+ fsspec = import_optional_dependency("fsspec")
+
+ parquet_kwargs["fs"] = fsspec.open(path, "rb", **(storage_options or {})).fs
+ elif isinstance(path, str) and not os.path.isdir(path):
+ # use get_handle only when we are very certain that it is not a directory
+ # fsspec resources can also point to directories
+ # this branch is used for example when reading from non-fsspec URLs
+ handles = get_handle(
+ path, "rb", is_text=False, storage_options=storage_options
+ )
+ path = handles.handle
+
+ try:
+ parquet_file = self.api.ParquetFile(path, **parquet_kwargs)
+ return parquet_file.to_pandas(columns=columns, filters=filters, **kwargs)
+ finally:
+ if handles is not None:
+ handles.close()
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+def to_parquet(
+ df: DataFrame,
+ path: FilePath | WriteBuffer[bytes] | None = None,
+ engine: str = "auto",
+ compression: str | None = "snappy",
+ index: bool | None = None,
+ storage_options: StorageOptions | None = None,
+ partition_cols: list[str] | None = None,
+ filesystem: Any = None,
+ **kwargs,
+) -> bytes | None:
+ """
+ Write a DataFrame to the parquet format.
+
+ Parameters
+ ----------
+ df : DataFrame
+ path : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``write()`` function. If None, the result is
+ returned as bytes. If a string, it will be used as Root Directory path
+ when writing a partitioned dataset. The engine fastparquet does not
+ accept file-like objects.
+ engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
+ Parquet library to use. If 'auto', then the option
+ ``io.parquet.engine`` is used. The default ``io.parquet.engine``
+ behavior is to try 'pyarrow', falling back to 'fastparquet' if
+ 'pyarrow' is unavailable.
+
+ When using the ``'pyarrow'`` engine and no storage options are provided
+ and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
+ (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
+ Use the filesystem keyword with an instantiated fsspec filesystem
+ if you wish to use its implementation.
+ compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}},
+ default 'snappy'. Name of the compression to use. Use ``None``
+ for no compression.
+ index : bool, default None
+ If ``True``, include the dataframe's index(es) in the file output. If
+ ``False``, they will not be written to the file.
+ If ``None``, similar to ``True`` the dataframe's index(es)
+ will be saved. However, instead of being saved as values,
+ the RangeIndex will be stored as a range in the metadata so it
+ doesn't require much space and is faster. Other indexes will
+ be included as columns in the file output.
+ partition_cols : str or list, optional, default None
+ Column names by which to partition the dataset.
+ Columns are partitioned in the order they are given.
+ Must be None if path is not a string.
+ {storage_options}
+
+ filesystem : fsspec or pyarrow filesystem, default None
+ Filesystem object to use when reading the parquet file. Only implemented
+ for ``engine="pyarrow"``.
+
+ .. versionadded:: 2.1.0
+
+ kwargs
+ Additional keyword arguments passed to the engine
+
+ Returns
+ -------
+ bytes if no path argument is provided else None
+ """
+ if isinstance(partition_cols, str):
+ partition_cols = [partition_cols]
+ impl = get_engine(engine)
+
+ path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path
+
+ impl.write(
+ df,
+ path_or_buf,
+ compression=compression,
+ index=index,
+ partition_cols=partition_cols,
+ storage_options=storage_options,
+ filesystem=filesystem,
+ **kwargs,
+ )
+
+ if path is None:
+ assert isinstance(path_or_buf, io.BytesIO)
+ return path_or_buf.getvalue()
+ else:
+ return None
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+def read_parquet(
+ path: FilePath | ReadBuffer[bytes],
+ engine: str = "auto",
+ columns: list[str] | None = None,
+ storage_options: StorageOptions | None = None,
+ use_nullable_dtypes: bool | lib.NoDefault = lib.no_default,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ filesystem: Any = None,
+ filters: list[tuple] | list[list[tuple]] | None = None,
+ **kwargs,
+) -> DataFrame:
+ """
+ Load a parquet object from the file path, returning a DataFrame.
+
+ Parameters
+ ----------
+ path : str, path object or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function.
+ The string could be a URL. Valid URL schemes include http, ftp, s3,
+ gs, and file. For file URLs, a host is expected. A local file could be:
+ ``file://localhost/path/to/table.parquet``.
+ A file URL can also be a path to a directory that contains multiple
+ partitioned parquet files. Both pyarrow and fastparquet support
+ paths to directories as well as file URLs. A directory path could be:
+ ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.
+ engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
+ Parquet library to use. If 'auto', then the option
+ ``io.parquet.engine`` is used. The default ``io.parquet.engine``
+ behavior is to try 'pyarrow', falling back to 'fastparquet' if
+ 'pyarrow' is unavailable.
+
+ When using the ``'pyarrow'`` engine and no storage options are provided
+ and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
+ (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
+ Use the filesystem keyword with an instantiated fsspec filesystem
+ if you wish to use its implementation.
+ columns : list, default=None
+ If not None, only these columns will be read from the file.
+ {storage_options}
+
+ .. versionadded:: 1.3.0
+
+ use_nullable_dtypes : bool, default False
+ If True, use dtypes that use ``pd.NA`` as missing value indicator
+ for the resulting DataFrame. (only applicable for the ``pyarrow``
+ engine)
+ As new dtypes are added that support ``pd.NA`` in the future, the
+ output with this option will change to use those dtypes.
+ Note: this is an experimental option, and behaviour (e.g. additional
+ support dtypes) may change without notice.
+
+ .. deprecated:: 2.0
+
+ dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ filesystem : fsspec or pyarrow filesystem, default None
+ Filesystem object to use when reading the parquet file. Only implemented
+ for ``engine="pyarrow"``.
+
+ .. versionadded:: 2.1.0
+
+ filters : List[Tuple] or List[List[Tuple]], default None
+ To filter out data.
+ Filter syntax: [[(column, op, val), ...],...]
+ where op is [==, =, >, >=, <, <=, !=, in, not in]
+ The innermost tuples are transposed into a set of filters applied
+ through an `AND` operation.
+ The outer list combines these sets of filters through an `OR`
+ operation.
+ A single list of tuples can also be used, meaning that no `OR`
+ operation between set of filters is to be conducted.
+
+ Using this argument will NOT result in row-wise filtering of the final
+ partitions unless ``engine="pyarrow"`` is also specified. For
+ other engines, filtering is only performed at the partition level, that is,
+ to prevent the loading of some row-groups and/or files.
+
+ .. versionadded:: 2.1.0
+
+ **kwargs
+ Any additional kwargs are passed to the engine.
+
+ Returns
+ -------
+ DataFrame
+
+ See Also
+ --------
+ DataFrame.to_parquet : Create a parquet object that serializes a DataFrame.
+
+ Examples
+ --------
+ >>> original_df = pd.DataFrame(
+ ... {{"foo": range(5), "bar": range(5, 10)}}
+ ... )
+ >>> original_df
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ >>> df_parquet_bytes = original_df.to_parquet()
+ >>> from io import BytesIO
+ >>> restored_df = pd.read_parquet(BytesIO(df_parquet_bytes))
+ >>> restored_df
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ >>> restored_df.equals(original_df)
+ True
+ >>> restored_bar = pd.read_parquet(BytesIO(df_parquet_bytes), columns=["bar"])
+ >>> restored_bar
+ bar
+ 0 5
+ 1 6
+ 2 7
+ 3 8
+ 4 9
+ >>> restored_bar.equals(original_df[['bar']])
+ True
+
+ The function uses `kwargs` that are passed directly to the engine.
+ In the following example, we use the `filters` argument of the pyarrow
+ engine to filter the rows of the DataFrame.
+
+ Since `pyarrow` is the default engine, we can omit the `engine` argument.
+ Note that the `filters` argument is implemented by the `pyarrow` engine,
+ which can benefit from multithreading and also potentially be more
+ economical in terms of memory.
+
+ >>> sel = [("foo", ">", 2)]
+ >>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel)
+ >>> restored_part
+ foo bar
+ 0 3 8
+ 1 4 9
+ """
+
+ impl = get_engine(engine)
+
+ if use_nullable_dtypes is not lib.no_default:
+ msg = (
+ "The argument 'use_nullable_dtypes' is deprecated and will be removed "
+ "in a future version."
+ )
+ if use_nullable_dtypes is True:
+ msg += (
+ "Use dtype_backend='numpy_nullable' instead of use_nullable_dtype=True."
+ )
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
+ else:
+ use_nullable_dtypes = False
+ check_dtype_backend(dtype_backend)
+
+ return impl.read(
+ path,
+ columns=columns,
+ filters=filters,
+ storage_options=storage_options,
+ use_nullable_dtypes=use_nullable_dtypes,
+ dtype_backend=dtype_backend,
+ filesystem=filesystem,
+ **kwargs,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff11968db15f0f7c6057a46c252a91daee7b9cd9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__init__.py
@@ -0,0 +1,9 @@
+from pandas.io.parsers.readers import (
+ TextFileReader,
+ TextParser,
+ read_csv,
+ read_fwf,
+ read_table,
+)
+
+__all__ = ["TextFileReader", "TextParser", "read_csv", "read_fwf", "read_table"]
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02bb3ef7c2aac14f7399fdfa54ee0b06a1185c3d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3a48d8df05dad6c6a7c86747a027c5b04683d1b5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d9568041f9ea91144543b322a0788b472328e9a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5e81f00cb147b9a3b78e2b571e6d94b354090a0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f083161cf5ff529f26a270090023241151327f1a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c021d896e60b2e63dc8ed95f651a89385ecea1b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..890b22154648e6b12d636c5df3595d105ff02ac9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py
@@ -0,0 +1,303 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+import warnings
+
+from pandas._config import using_pyarrow_string_dtype
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import (
+ ParserError,
+ ParserWarning,
+)
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.common import pandas_dtype
+from pandas.core.dtypes.inference import is_integer
+
+import pandas as pd
+from pandas import DataFrame
+
+from pandas.io._util import (
+ _arrow_dtype_mapping,
+ arrow_string_types_mapper,
+)
+from pandas.io.parsers.base_parser import ParserBase
+
+if TYPE_CHECKING:
+ from pandas._typing import ReadBuffer
+
+
+class ArrowParserWrapper(ParserBase):
+ """
+ Wrapper for the pyarrow engine for read_csv()
+ """
+
+ def __init__(self, src: ReadBuffer[bytes], **kwds) -> None:
+ super().__init__(kwds)
+ self.kwds = kwds
+ self.src = src
+
+ self._parse_kwds()
+
+ def _parse_kwds(self) -> None:
+ """
+ Validates keywords before passing to pyarrow.
+ """
+ encoding: str | None = self.kwds.get("encoding")
+ self.encoding = "utf-8" if encoding is None else encoding
+
+ na_values = self.kwds["na_values"]
+ if isinstance(na_values, dict):
+ raise ValueError(
+ "The pyarrow engine doesn't support passing a dict for na_values"
+ )
+ self.na_values = list(self.kwds["na_values"])
+
+ def _get_pyarrow_options(self) -> None:
+ """
+ Rename some arguments to pass to pyarrow
+ """
+ mapping = {
+ "usecols": "include_columns",
+ "na_values": "null_values",
+ "escapechar": "escape_char",
+ "skip_blank_lines": "ignore_empty_lines",
+ "decimal": "decimal_point",
+ "quotechar": "quote_char",
+ }
+ for pandas_name, pyarrow_name in mapping.items():
+ if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None:
+ self.kwds[pyarrow_name] = self.kwds.pop(pandas_name)
+
+ # Date format handling
+ # If we get a string, we need to convert it into a list for pyarrow
+ # If we get a dict, we want to parse those separately
+ date_format = self.date_format
+ if isinstance(date_format, str):
+ date_format = [date_format]
+ else:
+ # In case of dict, we don't want to propagate through, so
+ # just set to pyarrow default of None
+
+ # Ideally, in future we disable pyarrow dtype inference (read in as string)
+ # to prevent misreads.
+ date_format = None
+ self.kwds["timestamp_parsers"] = date_format
+
+ self.parse_options = {
+ option_name: option_value
+ for option_name, option_value in self.kwds.items()
+ if option_value is not None
+ and option_name
+ in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines")
+ }
+
+ on_bad_lines = self.kwds.get("on_bad_lines")
+ if on_bad_lines is not None:
+ if callable(on_bad_lines):
+ self.parse_options["invalid_row_handler"] = on_bad_lines
+ elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR:
+ self.parse_options[
+ "invalid_row_handler"
+ ] = None # PyArrow raises an exception by default
+ elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN:
+
+ def handle_warning(invalid_row) -> str:
+ warnings.warn(
+ f"Expected {invalid_row.expected_columns} columns, but found "
+ f"{invalid_row.actual_columns}: {invalid_row.text}",
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+ return "skip"
+
+ self.parse_options["invalid_row_handler"] = handle_warning
+ elif on_bad_lines == ParserBase.BadLineHandleMethod.SKIP:
+ self.parse_options["invalid_row_handler"] = lambda _: "skip"
+
+ self.convert_options = {
+ option_name: option_value
+ for option_name, option_value in self.kwds.items()
+ if option_value is not None
+ and option_name
+ in (
+ "include_columns",
+ "null_values",
+ "true_values",
+ "false_values",
+ "decimal_point",
+ "timestamp_parsers",
+ )
+ }
+ self.convert_options["strings_can_be_null"] = "" in self.kwds["null_values"]
+ # autogenerated column names are prefixed with 'f' in pyarrow.csv
+ if self.header is None and "include_columns" in self.convert_options:
+ self.convert_options["include_columns"] = [
+ f"f{n}" for n in self.convert_options["include_columns"]
+ ]
+
+ self.read_options = {
+ "autogenerate_column_names": self.header is None,
+ "skip_rows": self.header
+ if self.header is not None
+ else self.kwds["skiprows"],
+ "encoding": self.encoding,
+ }
+
+ def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame:
+ """
+ Processes data read in based on kwargs.
+
+ Parameters
+ ----------
+ frame: DataFrame
+ The DataFrame to process.
+
+ Returns
+ -------
+ DataFrame
+ The processed DataFrame.
+ """
+ num_cols = len(frame.columns)
+ multi_index_named = True
+ if self.header is None:
+ if self.names is None:
+ if self.header is None:
+ self.names = range(num_cols)
+ if len(self.names) != num_cols:
+ # usecols is passed through to pyarrow, we only handle index col here
+ # The only way self.names is not the same length as number of cols is
+ # if we have int index_col. We should just pad the names(they will get
+ # removed anyways) to expected length then.
+ self.names = list(range(num_cols - len(self.names))) + self.names
+ multi_index_named = False
+ frame.columns = self.names
+ # we only need the frame not the names
+ _, frame = self._do_date_conversions(frame.columns, frame)
+ if self.index_col is not None:
+ index_to_set = self.index_col.copy()
+ for i, item in enumerate(self.index_col):
+ if is_integer(item):
+ index_to_set[i] = frame.columns[item]
+ # String case
+ elif item not in frame.columns:
+ raise ValueError(f"Index {item} invalid")
+
+ # Process dtype for index_col and drop from dtypes
+ if self.dtype is not None:
+ key, new_dtype = (
+ (item, self.dtype.get(item))
+ if self.dtype.get(item) is not None
+ else (frame.columns[item], self.dtype.get(frame.columns[item]))
+ )
+ if new_dtype is not None:
+ frame[key] = frame[key].astype(new_dtype)
+ del self.dtype[key]
+
+ frame.set_index(index_to_set, drop=True, inplace=True)
+ # Clear names if headerless and no name given
+ if self.header is None and not multi_index_named:
+ frame.index.names = [None] * len(frame.index.names)
+
+ if self.dtype is not None:
+ # Ignore non-existent columns from dtype mapping
+ # like other parsers do
+ if isinstance(self.dtype, dict):
+ self.dtype = {
+ k: pandas_dtype(v)
+ for k, v in self.dtype.items()
+ if k in frame.columns
+ }
+ else:
+ self.dtype = pandas_dtype(self.dtype)
+ try:
+ frame = frame.astype(self.dtype)
+ except TypeError as e:
+ # GH#44901 reraise to keep api consistent
+ raise ValueError(e)
+ return frame
+
+ def _validate_usecols(self, usecols) -> None:
+ if lib.is_list_like(usecols) and not all(isinstance(x, str) for x in usecols):
+ raise ValueError(
+ "The pyarrow engine does not allow 'usecols' to be integer "
+ "column positions. Pass a list of string column names instead."
+ )
+ elif callable(usecols):
+ raise ValueError(
+ "The pyarrow engine does not allow 'usecols' to be a callable."
+ )
+
+ def read(self) -> DataFrame:
+ """
+ Reads the contents of a CSV file into a DataFrame and
+ processes it according to the kwargs passed in the
+ constructor.
+
+ Returns
+ -------
+ DataFrame
+ The DataFrame created from the CSV file.
+ """
+ pa = import_optional_dependency("pyarrow")
+ pyarrow_csv = import_optional_dependency("pyarrow.csv")
+ self._get_pyarrow_options()
+
+ try:
+ convert_options = pyarrow_csv.ConvertOptions(**self.convert_options)
+ except TypeError:
+ include = self.convert_options.get("include_columns", None)
+ if include is not None:
+ self._validate_usecols(include)
+
+ nulls = self.convert_options.get("null_values", set())
+ if not lib.is_list_like(nulls) or not all(
+ isinstance(x, str) for x in nulls
+ ):
+ raise TypeError(
+ "The 'pyarrow' engine requires all na_values to be strings"
+ )
+
+ raise
+
+ try:
+ table = pyarrow_csv.read_csv(
+ self.src,
+ read_options=pyarrow_csv.ReadOptions(**self.read_options),
+ parse_options=pyarrow_csv.ParseOptions(**self.parse_options),
+ convert_options=convert_options,
+ )
+ except pa.ArrowInvalid as e:
+ raise ParserError(e) from e
+
+ dtype_backend = self.kwds["dtype_backend"]
+
+ # Convert all pa.null() cols -> float64 (non nullable)
+ # else Int64 (nullable case, see below)
+ if dtype_backend is lib.no_default:
+ new_schema = table.schema
+ new_type = pa.float64()
+ for i, arrow_type in enumerate(table.schema.types):
+ if pa.types.is_null(arrow_type):
+ new_schema = new_schema.set(
+ i, new_schema.field(i).with_type(new_type)
+ )
+
+ table = table.cast(new_schema)
+
+ if dtype_backend == "pyarrow":
+ frame = table.to_pandas(types_mapper=pd.ArrowDtype)
+ elif dtype_backend == "numpy_nullable":
+ # Modify the default mapping to also
+ # map null to Int64 (to match other engines)
+ dtype_mapping = _arrow_dtype_mapping()
+ dtype_mapping[pa.null()] = pd.Int64Dtype()
+ frame = table.to_pandas(types_mapper=dtype_mapping.get)
+ elif using_pyarrow_string_dtype():
+ frame = table.to_pandas(types_mapper=arrow_string_types_mapper())
+
+ else:
+ frame = table.to_pandas()
+ return self._finalize_pandas_output(frame)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..09f0f2af8e5c6b55bff173ff74cd290fdf61cbae
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py
@@ -0,0 +1,1448 @@
+from __future__ import annotations
+
+from collections import defaultdict
+from copy import copy
+import csv
+import datetime
+from enum import Enum
+import itertools
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ cast,
+ final,
+ overload,
+)
+import warnings
+
+import numpy as np
+
+from pandas._libs import (
+ lib,
+ parsers,
+)
+import pandas._libs.ops as libops
+from pandas._libs.parsers import STR_NA_VALUES
+from pandas._libs.tslibs import parsing
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import (
+ ParserError,
+ ParserWarning,
+)
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.astype import astype_array
+from pandas.core.dtypes.common import (
+ ensure_object,
+ is_bool_dtype,
+ is_dict_like,
+ is_extension_array_dtype,
+ is_float_dtype,
+ is_integer,
+ is_integer_dtype,
+ is_list_like,
+ is_object_dtype,
+ is_scalar,
+ is_string_dtype,
+ pandas_dtype,
+)
+from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
+ ExtensionDtype,
+)
+from pandas.core.dtypes.missing import isna
+
+from pandas import (
+ ArrowDtype,
+ DataFrame,
+ DatetimeIndex,
+ StringDtype,
+ concat,
+)
+from pandas.core import algorithms
+from pandas.core.arrays import (
+ ArrowExtensionArray,
+ BaseMaskedArray,
+ BooleanArray,
+ Categorical,
+ ExtensionArray,
+ FloatingArray,
+ IntegerArray,
+)
+from pandas.core.arrays.boolean import BooleanDtype
+from pandas.core.indexes.api import (
+ Index,
+ MultiIndex,
+ default_index,
+ ensure_index_from_sequences,
+)
+from pandas.core.series import Series
+from pandas.core.tools import datetimes as tools
+
+from pandas.io.common import is_potential_multi_index
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+ )
+
+ from pandas._typing import (
+ ArrayLike,
+ DtypeArg,
+ DtypeObj,
+ Scalar,
+ )
+
+
+class ParserBase:
+ class BadLineHandleMethod(Enum):
+ ERROR = 0
+ WARN = 1
+ SKIP = 2
+
+ _implicit_index: bool
+ _first_chunk: bool
+ keep_default_na: bool
+ dayfirst: bool
+ cache_dates: bool
+ keep_date_col: bool
+ usecols_dtype: str | None
+
+ def __init__(self, kwds) -> None:
+ self._implicit_index = False
+
+ self.names = kwds.get("names")
+ self.orig_names: Sequence[Hashable] | None = None
+
+ self.index_col = kwds.get("index_col", None)
+ self.unnamed_cols: set = set()
+ self.index_names: Sequence[Hashable] | None = None
+ self.col_names: Sequence[Hashable] | None = None
+
+ self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
+ self._parse_date_cols: Iterable = []
+ self.date_parser = kwds.pop("date_parser", lib.no_default)
+ self.date_format = kwds.pop("date_format", None)
+ self.dayfirst = kwds.pop("dayfirst", False)
+ self.keep_date_col = kwds.pop("keep_date_col", False)
+
+ self.na_values = kwds.get("na_values")
+ self.na_fvalues = kwds.get("na_fvalues")
+ self.na_filter = kwds.get("na_filter", False)
+ self.keep_default_na = kwds.get("keep_default_na", True)
+
+ self.dtype = copy(kwds.get("dtype", None))
+ self.converters = kwds.get("converters")
+ self.dtype_backend = kwds.get("dtype_backend")
+
+ self.true_values = kwds.get("true_values")
+ self.false_values = kwds.get("false_values")
+ self.cache_dates = kwds.pop("cache_dates", True)
+
+ self._date_conv = _make_date_converter(
+ date_parser=self.date_parser,
+ date_format=self.date_format,
+ dayfirst=self.dayfirst,
+ cache_dates=self.cache_dates,
+ )
+
+ # validate header options for mi
+ self.header = kwds.get("header")
+ if is_list_like(self.header, allow_sets=False):
+ if kwds.get("usecols"):
+ raise ValueError(
+ "cannot specify usecols when specifying a multi-index header"
+ )
+ if kwds.get("names"):
+ raise ValueError(
+ "cannot specify names when specifying a multi-index header"
+ )
+
+ # validate index_col that only contains integers
+ if self.index_col is not None:
+ # In this case we can pin down index_col as list[int]
+ if is_integer(self.index_col):
+ self.index_col = [self.index_col]
+ elif not (
+ is_list_like(self.index_col, allow_sets=False)
+ and all(map(is_integer, self.index_col))
+ ):
+ raise ValueError(
+ "index_col must only contain row numbers "
+ "when specifying a multi-index header"
+ )
+ else:
+ self.index_col = list(self.index_col)
+
+ self._name_processed = False
+
+ self._first_chunk = True
+
+ self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"])
+
+ # Fallback to error to pass a sketchy test(test_override_set_noconvert_columns)
+ # Normally, this arg would get pre-processed earlier on
+ self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR)
+
+ def _validate_parse_dates_presence(self, columns: Sequence[Hashable]) -> Iterable:
+ """
+ Check if parse_dates are in columns.
+
+ If user has provided names for parse_dates, check if those columns
+ are available.
+
+ Parameters
+ ----------
+ columns : list
+ List of names of the dataframe.
+
+ Returns
+ -------
+ The names of the columns which will get parsed later if a dict or list
+ is given as specification.
+
+ Raises
+ ------
+ ValueError
+ If column to parse_date is not in dataframe.
+
+ """
+ cols_needed: Iterable
+ if is_dict_like(self.parse_dates):
+ cols_needed = itertools.chain(*self.parse_dates.values())
+ elif is_list_like(self.parse_dates):
+ # a column in parse_dates could be represented
+ # ColReference = Union[int, str]
+ # DateGroups = List[ColReference]
+ # ParseDates = Union[DateGroups, List[DateGroups],
+ # Dict[ColReference, DateGroups]]
+ cols_needed = itertools.chain.from_iterable(
+ col if is_list_like(col) and not isinstance(col, tuple) else [col]
+ for col in self.parse_dates
+ )
+ else:
+ cols_needed = []
+
+ cols_needed = list(cols_needed)
+
+ # get only columns that are references using names (str), not by index
+ missing_cols = ", ".join(
+ sorted(
+ {
+ col
+ for col in cols_needed
+ if isinstance(col, str) and col not in columns
+ }
+ )
+ )
+ if missing_cols:
+ raise ValueError(
+ f"Missing column provided to 'parse_dates': '{missing_cols}'"
+ )
+ # Convert positions to actual column names
+ return [
+ col if (isinstance(col, str) or col in columns) else columns[col]
+ for col in cols_needed
+ ]
+
+ def close(self) -> None:
+ pass
+
+ @final
+ @property
+ def _has_complex_date_col(self) -> bool:
+ return isinstance(self.parse_dates, dict) or (
+ isinstance(self.parse_dates, list)
+ and len(self.parse_dates) > 0
+ and isinstance(self.parse_dates[0], list)
+ )
+
+ @final
+ def _should_parse_dates(self, i: int) -> bool:
+ if lib.is_bool(self.parse_dates):
+ return bool(self.parse_dates)
+ else:
+ if self.index_names is not None:
+ name = self.index_names[i]
+ else:
+ name = None
+ j = i if self.index_col is None else self.index_col[i]
+
+ return (j in self.parse_dates) or (
+ name is not None and name in self.parse_dates
+ )
+
+ @final
+ def _extract_multi_indexer_columns(
+ self,
+ header,
+ index_names: Sequence[Hashable] | None,
+ passed_names: bool = False,
+ ) -> tuple[
+ Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool
+ ]:
+ """
+ Extract and return the names, index_names, col_names if the column
+ names are a MultiIndex.
+
+ Parameters
+ ----------
+ header: list of lists
+ The header rows
+ index_names: list, optional
+ The names of the future index
+ passed_names: bool, default False
+ A flag specifying if names where passed
+
+ """
+ if len(header) < 2:
+ return header[0], index_names, None, passed_names
+
+ # the names are the tuples of the header that are not the index cols
+ # 0 is the name of the index, assuming index_col is a list of column
+ # numbers
+ ic = self.index_col
+ if ic is None:
+ ic = []
+
+ if not isinstance(ic, (list, tuple, np.ndarray)):
+ ic = [ic]
+ sic = set(ic)
+
+ # clean the index_names
+ index_names = header.pop(-1)
+ index_names, _, _ = self._clean_index_names(index_names, self.index_col)
+
+ # extract the columns
+ field_count = len(header[0])
+
+ # check if header lengths are equal
+ if not all(len(header_iter) == field_count for header_iter in header[1:]):
+ raise ParserError("Header rows must have an equal number of columns.")
+
+ def extract(r):
+ return tuple(r[i] for i in range(field_count) if i not in sic)
+
+ columns = list(zip(*(extract(r) for r in header)))
+ names = columns.copy()
+ for single_ic in sorted(ic):
+ names.insert(single_ic, single_ic)
+
+ # Clean the column names (if we have an index_col).
+ if len(ic):
+ col_names = [
+ r[ic[0]]
+ if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols)
+ else None
+ for r in header
+ ]
+ else:
+ col_names = [None] * len(header)
+
+ passed_names = True
+
+ return names, index_names, col_names, passed_names
+
+ @final
+ def _maybe_make_multi_index_columns(
+ self,
+ columns: Sequence[Hashable],
+ col_names: Sequence[Hashable] | None = None,
+ ) -> Sequence[Hashable] | MultiIndex:
+ # possibly create a column mi here
+ if is_potential_multi_index(columns):
+ list_columns = cast(list[tuple], columns)
+ return MultiIndex.from_tuples(list_columns, names=col_names)
+ return columns
+
+ @final
+ def _make_index(
+ self, data, alldata, columns, indexnamerow: list[Scalar] | None = None
+ ) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]:
+ index: Index | None
+ if not is_index_col(self.index_col) or not self.index_col:
+ index = None
+
+ elif not self._has_complex_date_col:
+ simple_index = self._get_simple_index(alldata, columns)
+ index = self._agg_index(simple_index)
+ elif self._has_complex_date_col:
+ if not self._name_processed:
+ (self.index_names, _, self.index_col) = self._clean_index_names(
+ list(columns), self.index_col
+ )
+ self._name_processed = True
+ date_index = self._get_complex_date_index(data, columns)
+ index = self._agg_index(date_index, try_parse_dates=False)
+
+ # add names for the index
+ if indexnamerow:
+ coffset = len(indexnamerow) - len(columns)
+ assert index is not None
+ index = index.set_names(indexnamerow[:coffset])
+
+ # maybe create a mi on the columns
+ columns = self._maybe_make_multi_index_columns(columns, self.col_names)
+
+ return index, columns
+
+ @final
+ def _get_simple_index(self, data, columns):
+ def ix(col):
+ if not isinstance(col, str):
+ return col
+ raise ValueError(f"Index {col} invalid")
+
+ to_remove = []
+ index = []
+ for idx in self.index_col:
+ i = ix(idx)
+ to_remove.append(i)
+ index.append(data[i])
+
+ # remove index items from content and columns, don't pop in
+ # loop
+ for i in sorted(to_remove, reverse=True):
+ data.pop(i)
+ if not self._implicit_index:
+ columns.pop(i)
+
+ return index
+
+ @final
+ def _get_complex_date_index(self, data, col_names):
+ def _get_name(icol):
+ if isinstance(icol, str):
+ return icol
+
+ if col_names is None:
+ raise ValueError(f"Must supply column order to use {icol!s} as index")
+
+ for i, c in enumerate(col_names):
+ if i == icol:
+ return c
+
+ to_remove = []
+ index = []
+ for idx in self.index_col:
+ name = _get_name(idx)
+ to_remove.append(name)
+ index.append(data[name])
+
+ # remove index items from content and columns, don't pop in
+ # loop
+ for c in sorted(to_remove, reverse=True):
+ data.pop(c)
+ col_names.remove(c)
+
+ return index
+
+ @final
+ def _clean_mapping(self, mapping):
+ """converts col numbers to names"""
+ if not isinstance(mapping, dict):
+ return mapping
+ clean = {}
+ # for mypy
+ assert self.orig_names is not None
+
+ for col, v in mapping.items():
+ if isinstance(col, int) and col not in self.orig_names:
+ col = self.orig_names[col]
+ clean[col] = v
+ if isinstance(mapping, defaultdict):
+ remaining_cols = set(self.orig_names) - set(clean.keys())
+ clean.update({col: mapping[col] for col in remaining_cols})
+ return clean
+
+ @final
+ def _agg_index(self, index, try_parse_dates: bool = True) -> Index:
+ arrays = []
+ converters = self._clean_mapping(self.converters)
+
+ for i, arr in enumerate(index):
+ if try_parse_dates and self._should_parse_dates(i):
+ arr = self._date_conv(
+ arr,
+ col=self.index_names[i] if self.index_names is not None else None,
+ )
+
+ if self.na_filter:
+ col_na_values = self.na_values
+ col_na_fvalues = self.na_fvalues
+ else:
+ col_na_values = set()
+ col_na_fvalues = set()
+
+ if isinstance(self.na_values, dict):
+ assert self.index_names is not None
+ col_name = self.index_names[i]
+ if col_name is not None:
+ col_na_values, col_na_fvalues = _get_na_values(
+ col_name, self.na_values, self.na_fvalues, self.keep_default_na
+ )
+
+ clean_dtypes = self._clean_mapping(self.dtype)
+
+ cast_type = None
+ index_converter = False
+ if self.index_names is not None:
+ if isinstance(clean_dtypes, dict):
+ cast_type = clean_dtypes.get(self.index_names[i], None)
+
+ if isinstance(converters, dict):
+ index_converter = converters.get(self.index_names[i]) is not None
+
+ try_num_bool = not (
+ cast_type and is_string_dtype(cast_type) or index_converter
+ )
+
+ arr, _ = self._infer_types(
+ arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool
+ )
+ arrays.append(arr)
+
+ names = self.index_names
+ index = ensure_index_from_sequences(arrays, names)
+
+ return index
+
+ @final
+ def _convert_to_ndarrays(
+ self,
+ dct: Mapping,
+ na_values,
+ na_fvalues,
+ verbose: bool = False,
+ converters=None,
+ dtypes=None,
+ ):
+ result = {}
+ for c, values in dct.items():
+ conv_f = None if converters is None else converters.get(c, None)
+ if isinstance(dtypes, dict):
+ cast_type = dtypes.get(c, None)
+ else:
+ # single dtype or None
+ cast_type = dtypes
+
+ if self.na_filter:
+ col_na_values, col_na_fvalues = _get_na_values(
+ c, na_values, na_fvalues, self.keep_default_na
+ )
+ else:
+ col_na_values, col_na_fvalues = set(), set()
+
+ if c in self._parse_date_cols:
+ # GH#26203 Do not convert columns which get converted to dates
+ # but replace nans to ensure to_datetime works
+ mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues)
+ np.putmask(values, mask, np.nan)
+ result[c] = values
+ continue
+
+ if conv_f is not None:
+ # conv_f applied to data before inference
+ if cast_type is not None:
+ warnings.warn(
+ (
+ "Both a converter and dtype were specified "
+ f"for column {c} - only the converter will be used."
+ ),
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ try:
+ values = lib.map_infer(values, conv_f)
+ except ValueError:
+ mask = algorithms.isin(values, list(na_values)).view(np.uint8)
+ values = lib.map_infer_mask(values, conv_f, mask)
+
+ cvals, na_count = self._infer_types(
+ values,
+ set(col_na_values) | col_na_fvalues,
+ cast_type is None,
+ try_num_bool=False,
+ )
+ else:
+ is_ea = is_extension_array_dtype(cast_type)
+ is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
+ # skip inference if specified dtype is object
+ # or casting to an EA
+ try_num_bool = not (cast_type and is_str_or_ea_dtype)
+
+ # general type inference and conversion
+ cvals, na_count = self._infer_types(
+ values,
+ set(col_na_values) | col_na_fvalues,
+ cast_type is None,
+ try_num_bool,
+ )
+
+ # type specified in dtype param or cast_type is an EA
+ if cast_type is not None:
+ cast_type = pandas_dtype(cast_type)
+ if cast_type and (cvals.dtype != cast_type or is_ea):
+ if not is_ea and na_count > 0:
+ if is_bool_dtype(cast_type):
+ raise ValueError(f"Bool column has NA values in column {c}")
+ cvals = self._cast_types(cvals, cast_type, c)
+
+ result[c] = cvals
+ if verbose and na_count:
+ print(f"Filled {na_count} NA values in column {c!s}")
+ return result
+
+ @final
+ def _set_noconvert_dtype_columns(
+ self, col_indices: list[int], names: Sequence[Hashable]
+ ) -> set[int]:
+ """
+ Set the columns that should not undergo dtype conversions.
+
+ Currently, any column that is involved with date parsing will not
+ undergo such conversions. If usecols is specified, the positions of the columns
+ not to cast is relative to the usecols not to all columns.
+
+ Parameters
+ ----------
+ col_indices: The indices specifying order and positions of the columns
+ names: The column names which order is corresponding with the order
+ of col_indices
+
+ Returns
+ -------
+ A set of integers containing the positions of the columns not to convert.
+ """
+ usecols: list[int] | list[str] | None
+ noconvert_columns = set()
+ if self.usecols_dtype == "integer":
+ # A set of integers will be converted to a list in
+ # the correct order every single time.
+ usecols = sorted(self.usecols)
+ elif callable(self.usecols) or self.usecols_dtype not in ("empty", None):
+ # The names attribute should have the correct columns
+ # in the proper order for indexing with parse_dates.
+ usecols = col_indices
+ else:
+ # Usecols is empty.
+ usecols = None
+
+ def _set(x) -> int:
+ if usecols is not None and is_integer(x):
+ x = usecols[x]
+
+ if not is_integer(x):
+ x = col_indices[names.index(x)]
+
+ return x
+
+ if isinstance(self.parse_dates, list):
+ for val in self.parse_dates:
+ if isinstance(val, list):
+ for k in val:
+ noconvert_columns.add(_set(k))
+ else:
+ noconvert_columns.add(_set(val))
+
+ elif isinstance(self.parse_dates, dict):
+ for val in self.parse_dates.values():
+ if isinstance(val, list):
+ for k in val:
+ noconvert_columns.add(_set(k))
+ else:
+ noconvert_columns.add(_set(val))
+
+ elif self.parse_dates:
+ if isinstance(self.index_col, list):
+ for k in self.index_col:
+ noconvert_columns.add(_set(k))
+ elif self.index_col is not None:
+ noconvert_columns.add(_set(self.index_col))
+
+ return noconvert_columns
+
+ @final
+ def _infer_types(
+ self, values, na_values, no_dtype_specified, try_num_bool: bool = True
+ ) -> tuple[ArrayLike, int]:
+ """
+ Infer types of values, possibly casting
+
+ Parameters
+ ----------
+ values : ndarray
+ na_values : set
+ no_dtype_specified: Specifies if we want to cast explicitly
+ try_num_bool : bool, default try
+ try to cast values to numeric (first preference) or boolean
+
+ Returns
+ -------
+ converted : ndarray or ExtensionArray
+ na_count : int
+ """
+ na_count = 0
+ if issubclass(values.dtype.type, (np.number, np.bool_)):
+ # If our array has numeric dtype, we don't have to check for strings in isin
+ na_values = np.array([val for val in na_values if not isinstance(val, str)])
+ mask = algorithms.isin(values, na_values)
+ na_count = mask.astype("uint8", copy=False).sum()
+ if na_count > 0:
+ if is_integer_dtype(values):
+ values = values.astype(np.float64)
+ np.putmask(values, mask, np.nan)
+ return values, na_count
+
+ dtype_backend = self.dtype_backend
+ non_default_dtype_backend = (
+ no_dtype_specified and dtype_backend is not lib.no_default
+ )
+ result: ArrayLike
+
+ if try_num_bool and is_object_dtype(values.dtype):
+ # exclude e.g DatetimeIndex here
+ try:
+ result, result_mask = lib.maybe_convert_numeric(
+ values,
+ na_values,
+ False,
+ convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type]
+ )
+ except (ValueError, TypeError):
+ # e.g. encountering datetime string gets ValueError
+ # TypeError can be raised in floatify
+ na_count = parsers.sanitize_objects(values, na_values)
+ result = values
+ else:
+ if non_default_dtype_backend:
+ if result_mask is None:
+ result_mask = np.zeros(result.shape, dtype=np.bool_)
+
+ if result_mask.all():
+ result = IntegerArray(
+ np.ones(result_mask.shape, dtype=np.int64), result_mask
+ )
+ elif is_integer_dtype(result):
+ result = IntegerArray(result, result_mask)
+ elif is_bool_dtype(result):
+ result = BooleanArray(result, result_mask)
+ elif is_float_dtype(result):
+ result = FloatingArray(result, result_mask)
+
+ na_count = result_mask.sum()
+ else:
+ na_count = isna(result).sum()
+ else:
+ result = values
+ if values.dtype == np.object_:
+ na_count = parsers.sanitize_objects(values, na_values)
+
+ if result.dtype == np.object_ and try_num_bool:
+ result, bool_mask = libops.maybe_convert_bool(
+ np.asarray(values),
+ true_values=self.true_values,
+ false_values=self.false_values,
+ convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type]
+ )
+ if result.dtype == np.bool_ and non_default_dtype_backend:
+ if bool_mask is None:
+ bool_mask = np.zeros(result.shape, dtype=np.bool_)
+ result = BooleanArray(result, bool_mask)
+ elif result.dtype == np.object_ and non_default_dtype_backend:
+ # read_excel sends array of datetime objects
+ if not lib.is_datetime_array(result, skipna=True):
+ dtype = StringDtype()
+ cls = dtype.construct_array_type()
+ result = cls._from_sequence(values, dtype=dtype)
+
+ if dtype_backend == "pyarrow":
+ pa = import_optional_dependency("pyarrow")
+ if isinstance(result, np.ndarray):
+ result = ArrowExtensionArray(pa.array(result, from_pandas=True))
+ elif isinstance(result, BaseMaskedArray):
+ if result._mask.all():
+ # We want an arrow null array here
+ result = ArrowExtensionArray(pa.array([None] * len(result)))
+ else:
+ result = ArrowExtensionArray(
+ pa.array(result._data, mask=result._mask)
+ )
+ else:
+ result = ArrowExtensionArray(
+ pa.array(result.to_numpy(), from_pandas=True)
+ )
+
+ return result, na_count
+
+ @final
+ def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike:
+ """
+ Cast values to specified type
+
+ Parameters
+ ----------
+ values : ndarray or ExtensionArray
+ cast_type : np.dtype or ExtensionDtype
+ dtype to cast values to
+ column : string
+ column name - used only for error reporting
+
+ Returns
+ -------
+ converted : ndarray or ExtensionArray
+ """
+ if isinstance(cast_type, CategoricalDtype):
+ known_cats = cast_type.categories is not None
+
+ if not is_object_dtype(values.dtype) and not known_cats:
+ # TODO: this is for consistency with
+ # c-parser which parses all categories
+ # as strings
+ values = lib.ensure_string_array(
+ values, skipna=False, convert_na_value=False
+ )
+
+ cats = Index(values).unique().dropna()
+ values = Categorical._from_inferred_categories(
+ cats, cats.get_indexer(values), cast_type, true_values=self.true_values
+ )
+
+ # use the EA's implementation of casting
+ elif isinstance(cast_type, ExtensionDtype):
+ array_type = cast_type.construct_array_type()
+ try:
+ if isinstance(cast_type, BooleanDtype):
+ # error: Unexpected keyword argument "true_values" for
+ # "_from_sequence_of_strings" of "ExtensionArray"
+ return array_type._from_sequence_of_strings( # type: ignore[call-arg]
+ values,
+ dtype=cast_type,
+ true_values=self.true_values,
+ false_values=self.false_values,
+ )
+ else:
+ return array_type._from_sequence_of_strings(values, dtype=cast_type)
+ except NotImplementedError as err:
+ raise NotImplementedError(
+ f"Extension Array: {array_type} must implement "
+ "_from_sequence_of_strings in order to be used in parser methods"
+ ) from err
+
+ elif isinstance(values, ExtensionArray):
+ values = values.astype(cast_type, copy=False)
+ elif issubclass(cast_type.type, str):
+ # TODO: why skipna=True here and False above? some tests depend
+ # on it here, but nothing fails if we change it above
+ # (as no tests get there as of 2022-12-06)
+ values = lib.ensure_string_array(
+ values, skipna=True, convert_na_value=False
+ )
+ else:
+ try:
+ values = astype_array(values, cast_type, copy=True)
+ except ValueError as err:
+ raise ValueError(
+ f"Unable to convert column {column} to type {cast_type}"
+ ) from err
+ return values
+
+ @overload
+ def _do_date_conversions(
+ self,
+ names: Index,
+ data: DataFrame,
+ ) -> tuple[Sequence[Hashable] | Index, DataFrame]:
+ ...
+
+ @overload
+ def _do_date_conversions(
+ self,
+ names: Sequence[Hashable],
+ data: Mapping[Hashable, ArrayLike],
+ ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]:
+ ...
+
+ @final
+ def _do_date_conversions(
+ self,
+ names: Sequence[Hashable] | Index,
+ data: Mapping[Hashable, ArrayLike] | DataFrame,
+ ) -> tuple[Sequence[Hashable] | Index, Mapping[Hashable, ArrayLike] | DataFrame]:
+ # returns data, columns
+
+ if self.parse_dates is not None:
+ data, names = _process_date_conversion(
+ data,
+ self._date_conv,
+ self.parse_dates,
+ self.index_col,
+ self.index_names,
+ names,
+ keep_date_col=self.keep_date_col,
+ dtype_backend=self.dtype_backend,
+ )
+
+ return names, data
+
+ @final
+ def _check_data_length(
+ self,
+ columns: Sequence[Hashable],
+ data: Sequence[ArrayLike],
+ ) -> None:
+ """Checks if length of data is equal to length of column names.
+
+ One set of trailing commas is allowed. self.index_col not False
+ results in a ParserError previously when lengths do not match.
+
+ Parameters
+ ----------
+ columns: list of column names
+ data: list of array-likes containing the data column-wise.
+ """
+ if not self.index_col and len(columns) != len(data) and columns:
+ empty_str = is_object_dtype(data[-1]) and data[-1] == ""
+ # error: No overload variant of "__ror__" of "ndarray" matches
+ # argument type "ExtensionArray"
+ empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator]
+ if len(columns) == len(data) - 1 and np.all(empty_str_or_na):
+ return
+ warnings.warn(
+ "Length of header or names does not match length of data. This leads "
+ "to a loss of data with index_col=False.",
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ @overload
+ def _evaluate_usecols(
+ self,
+ usecols: set[int] | Callable[[Hashable], object],
+ names: Sequence[Hashable],
+ ) -> set[int]:
+ ...
+
+ @overload
+ def _evaluate_usecols(
+ self, usecols: set[str], names: Sequence[Hashable]
+ ) -> set[str]:
+ ...
+
+ @final
+ def _evaluate_usecols(
+ self,
+ usecols: Callable[[Hashable], object] | set[str] | set[int],
+ names: Sequence[Hashable],
+ ) -> set[str] | set[int]:
+ """
+ Check whether or not the 'usecols' parameter
+ is a callable. If so, enumerates the 'names'
+ parameter and returns a set of indices for
+ each entry in 'names' that evaluates to True.
+ If not a callable, returns 'usecols'.
+ """
+ if callable(usecols):
+ return {i for i, name in enumerate(names) if usecols(name)}
+ return usecols
+
+ @final
+ def _validate_usecols_names(self, usecols, names: Sequence):
+ """
+ Validates that all usecols are present in a given
+ list of names. If not, raise a ValueError that
+ shows what usecols are missing.
+
+ Parameters
+ ----------
+ usecols : iterable of usecols
+ The columns to validate are present in names.
+ names : iterable of names
+ The column names to check against.
+
+ Returns
+ -------
+ usecols : iterable of usecols
+ The `usecols` parameter if the validation succeeds.
+
+ Raises
+ ------
+ ValueError : Columns were missing. Error message will list them.
+ """
+ missing = [c for c in usecols if c not in names]
+ if len(missing) > 0:
+ raise ValueError(
+ f"Usecols do not match columns, columns expected but not found: "
+ f"{missing}"
+ )
+
+ return usecols
+
+ @final
+ def _validate_usecols_arg(self, usecols):
+ """
+ Validate the 'usecols' parameter.
+
+ Checks whether or not the 'usecols' parameter contains all integers
+ (column selection by index), strings (column by name) or is a callable.
+ Raises a ValueError if that is not the case.
+
+ Parameters
+ ----------
+ usecols : list-like, callable, or None
+ List of columns to use when parsing or a callable that can be used
+ to filter a list of table columns.
+
+ Returns
+ -------
+ usecols_tuple : tuple
+ A tuple of (verified_usecols, usecols_dtype).
+
+ 'verified_usecols' is either a set if an array-like is passed in or
+ 'usecols' if a callable or None is passed in.
+
+ 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
+ is passed in or None if a callable or None is passed in.
+ """
+ msg = (
+ "'usecols' must either be list-like of all strings, all unicode, "
+ "all integers or a callable."
+ )
+ if usecols is not None:
+ if callable(usecols):
+ return usecols, None
+
+ if not is_list_like(usecols):
+ # see gh-20529
+ #
+ # Ensure it is iterable container but not string.
+ raise ValueError(msg)
+
+ usecols_dtype = lib.infer_dtype(usecols, skipna=False)
+
+ if usecols_dtype not in ("empty", "integer", "string"):
+ raise ValueError(msg)
+
+ usecols = set(usecols)
+
+ return usecols, usecols_dtype
+ return usecols, None
+
+ @final
+ def _clean_index_names(self, columns, index_col) -> tuple[list | None, list, list]:
+ if not is_index_col(index_col):
+ return None, columns, index_col
+
+ columns = list(columns)
+
+ # In case of no rows and multiindex columns we have to set index_names to
+ # list of Nones GH#38292
+ if not columns:
+ return [None] * len(index_col), columns, index_col
+
+ cp_cols = list(columns)
+ index_names: list[str | int | None] = []
+
+ # don't mutate
+ index_col = list(index_col)
+
+ for i, c in enumerate(index_col):
+ if isinstance(c, str):
+ index_names.append(c)
+ for j, name in enumerate(cp_cols):
+ if name == c:
+ index_col[i] = j
+ columns.remove(name)
+ break
+ else:
+ name = cp_cols[c]
+ columns.remove(name)
+ index_names.append(name)
+
+ # Only clean index names that were placeholders.
+ for i, name in enumerate(index_names):
+ if isinstance(name, str) and name in self.unnamed_cols:
+ index_names[i] = None
+
+ return index_names, columns, index_col
+
+ @final
+ def _get_empty_meta(self, columns, dtype: DtypeArg | None = None):
+ columns = list(columns)
+
+ index_col = self.index_col
+ index_names = self.index_names
+
+ # Convert `dtype` to a defaultdict of some kind.
+ # This will enable us to write `dtype[col_name]`
+ # without worrying about KeyError issues later on.
+ dtype_dict: defaultdict[Hashable, Any]
+ if not is_dict_like(dtype):
+ # if dtype == None, default will be object.
+ default_dtype = dtype or object
+ dtype_dict = defaultdict(lambda: default_dtype)
+ else:
+ dtype = cast(dict, dtype)
+ dtype_dict = defaultdict(
+ lambda: object,
+ {columns[k] if is_integer(k) else k: v for k, v in dtype.items()},
+ )
+
+ # Even though we have no data, the "index" of the empty DataFrame
+ # could for example still be an empty MultiIndex. Thus, we need to
+ # check whether we have any index columns specified, via either:
+ #
+ # 1) index_col (column indices)
+ # 2) index_names (column names)
+ #
+ # Both must be non-null to ensure a successful construction. Otherwise,
+ # we have to create a generic empty Index.
+ index: Index
+ if (index_col is None or index_col is False) or index_names is None:
+ index = default_index(0)
+ else:
+ data = [Series([], dtype=dtype_dict[name]) for name in index_names]
+ index = ensure_index_from_sequences(data, names=index_names)
+ index_col.sort()
+
+ for i, n in enumerate(index_col):
+ columns.pop(n - i)
+
+ col_dict = {
+ col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns
+ }
+
+ return index, columns, col_dict
+
+
+def _make_date_converter(
+ date_parser=lib.no_default,
+ dayfirst: bool = False,
+ cache_dates: bool = True,
+ date_format: dict[Hashable, str] | str | None = None,
+):
+ if date_parser is not lib.no_default:
+ warnings.warn(
+ "The argument 'date_parser' is deprecated and will "
+ "be removed in a future version. "
+ "Please use 'date_format' instead, or read your data in as 'object' dtype "
+ "and then call 'to_datetime'.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ if date_parser is not lib.no_default and date_format is not None:
+ raise TypeError("Cannot use both 'date_parser' and 'date_format'")
+
+ def unpack_if_single_element(arg):
+ # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615
+ if isinstance(arg, np.ndarray) and arg.ndim == 1 and len(arg) == 1:
+ return arg[0]
+ return arg
+
+ def converter(*date_cols, col: Hashable):
+ if len(date_cols) == 1 and date_cols[0].dtype.kind in "Mm":
+ return date_cols[0]
+
+ if date_parser is lib.no_default:
+ strs = parsing.concat_date_cols(date_cols)
+ date_fmt = (
+ date_format.get(col) if isinstance(date_format, dict) else date_format
+ )
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ ".*parsing datetimes with mixed time zones will raise an error",
+ category=FutureWarning,
+ )
+ str_objs = ensure_object(strs)
+ try:
+ result = tools.to_datetime(
+ str_objs,
+ format=date_fmt,
+ utc=False,
+ dayfirst=dayfirst,
+ cache=cache_dates,
+ )
+ except (ValueError, TypeError):
+ # test_usecols_with_parse_dates4
+ return str_objs
+
+ if isinstance(result, DatetimeIndex):
+ arr = result.to_numpy()
+ arr.flags.writeable = True
+ return arr
+ return result._values
+ else:
+ try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ ".*parsing datetimes with mixed time zones "
+ "will raise an error",
+ category=FutureWarning,
+ )
+ pre_parsed = date_parser(
+ *(unpack_if_single_element(arg) for arg in date_cols)
+ )
+ try:
+ result = tools.to_datetime(
+ pre_parsed,
+ cache=cache_dates,
+ )
+ except (ValueError, TypeError):
+ # test_read_csv_with_custom_date_parser
+ result = pre_parsed
+ if isinstance(result, datetime.datetime):
+ raise Exception("scalar parser")
+ return result
+ except Exception:
+ # e.g. test_datetime_fractional_seconds
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ ".*parsing datetimes with mixed time zones "
+ "will raise an error",
+ category=FutureWarning,
+ )
+ pre_parsed = parsing.try_parse_dates(
+ parsing.concat_date_cols(date_cols),
+ parser=date_parser,
+ )
+ try:
+ return tools.to_datetime(pre_parsed)
+ except (ValueError, TypeError):
+ # TODO: not reached in tests 2023-10-27; needed?
+ return pre_parsed
+
+ return converter
+
+
+parser_defaults = {
+ "delimiter": None,
+ "escapechar": None,
+ "quotechar": '"',
+ "quoting": csv.QUOTE_MINIMAL,
+ "doublequote": True,
+ "skipinitialspace": False,
+ "lineterminator": None,
+ "header": "infer",
+ "index_col": None,
+ "names": None,
+ "skiprows": None,
+ "skipfooter": 0,
+ "nrows": None,
+ "na_values": None,
+ "keep_default_na": True,
+ "true_values": None,
+ "false_values": None,
+ "converters": None,
+ "dtype": None,
+ "cache_dates": True,
+ "thousands": None,
+ "comment": None,
+ "decimal": ".",
+ # 'engine': 'c',
+ "parse_dates": False,
+ "keep_date_col": False,
+ "dayfirst": False,
+ "date_parser": lib.no_default,
+ "date_format": None,
+ "usecols": None,
+ # 'iterator': False,
+ "chunksize": None,
+ "verbose": False,
+ "encoding": None,
+ "compression": None,
+ "skip_blank_lines": True,
+ "encoding_errors": "strict",
+ "on_bad_lines": ParserBase.BadLineHandleMethod.ERROR,
+ "dtype_backend": lib.no_default,
+}
+
+
+def _process_date_conversion(
+ data_dict,
+ converter: Callable,
+ parse_spec,
+ index_col,
+ index_names,
+ columns,
+ keep_date_col: bool = False,
+ dtype_backend=lib.no_default,
+):
+ def _isindex(colspec):
+ return (isinstance(index_col, list) and colspec in index_col) or (
+ isinstance(index_names, list) and colspec in index_names
+ )
+
+ new_cols = []
+ new_data = {}
+
+ orig_names = columns
+ columns = list(columns)
+
+ date_cols = set()
+
+ if parse_spec is None or isinstance(parse_spec, bool):
+ return data_dict, columns
+
+ if isinstance(parse_spec, list):
+ # list of column lists
+ for colspec in parse_spec:
+ if is_scalar(colspec) or isinstance(colspec, tuple):
+ if isinstance(colspec, int) and colspec not in data_dict:
+ colspec = orig_names[colspec]
+ if _isindex(colspec):
+ continue
+ elif dtype_backend == "pyarrow":
+ import pyarrow as pa
+
+ dtype = data_dict[colspec].dtype
+ if isinstance(dtype, ArrowDtype) and (
+ pa.types.is_timestamp(dtype.pyarrow_dtype)
+ or pa.types.is_date(dtype.pyarrow_dtype)
+ ):
+ continue
+
+ # Pyarrow engine returns Series which we need to convert to
+ # numpy array before converter, its a no-op for other parsers
+ data_dict[colspec] = converter(
+ np.asarray(data_dict[colspec]), col=colspec
+ )
+ else:
+ new_name, col, old_names = _try_convert_dates(
+ converter, colspec, data_dict, orig_names
+ )
+ if new_name in data_dict:
+ raise ValueError(f"New date column already in dict {new_name}")
+ new_data[new_name] = col
+ new_cols.append(new_name)
+ date_cols.update(old_names)
+
+ elif isinstance(parse_spec, dict):
+ # dict of new name to column list
+ for new_name, colspec in parse_spec.items():
+ if new_name in data_dict:
+ raise ValueError(f"Date column {new_name} already in dict")
+
+ _, col, old_names = _try_convert_dates(
+ converter,
+ colspec,
+ data_dict,
+ orig_names,
+ target_name=new_name,
+ )
+
+ new_data[new_name] = col
+
+ # If original column can be converted to date we keep the converted values
+ # This can only happen if values are from single column
+ if len(colspec) == 1:
+ new_data[colspec[0]] = col
+
+ new_cols.append(new_name)
+ date_cols.update(old_names)
+
+ if isinstance(data_dict, DataFrame):
+ data_dict = concat([DataFrame(new_data), data_dict], axis=1, copy=False)
+ else:
+ data_dict.update(new_data)
+ new_cols.extend(columns)
+
+ if not keep_date_col:
+ for c in list(date_cols):
+ data_dict.pop(c)
+ new_cols.remove(c)
+
+ return data_dict, new_cols
+
+
+def _try_convert_dates(
+ parser: Callable, colspec, data_dict, columns, target_name: str | None = None
+):
+ colset = set(columns)
+ colnames = []
+
+ for c in colspec:
+ if c in colset:
+ colnames.append(c)
+ elif isinstance(c, int) and c not in columns:
+ colnames.append(columns[c])
+ else:
+ colnames.append(c)
+
+ new_name: tuple | str
+ if all(isinstance(x, tuple) for x in colnames):
+ new_name = tuple(map("_".join, zip(*colnames)))
+ else:
+ new_name = "_".join([str(x) for x in colnames])
+ to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict]
+
+ new_col = parser(*to_parse, col=new_name if target_name is None else target_name)
+ return new_name, new_col, colnames
+
+
+def _get_na_values(col, na_values, na_fvalues, keep_default_na: bool):
+ """
+ Get the NaN values for a given column.
+
+ Parameters
+ ----------
+ col : str
+ The name of the column.
+ na_values : array-like, dict
+ The object listing the NaN values as strings.
+ na_fvalues : array-like, dict
+ The object listing the NaN values as floats.
+ keep_default_na : bool
+ If `na_values` is a dict, and the column is not mapped in the
+ dictionary, whether to return the default NaN values or the empty set.
+
+ Returns
+ -------
+ nan_tuple : A length-two tuple composed of
+
+ 1) na_values : the string NaN values for that column.
+ 2) na_fvalues : the float NaN values for that column.
+ """
+ if isinstance(na_values, dict):
+ if col in na_values:
+ return na_values[col], na_fvalues[col]
+ else:
+ if keep_default_na:
+ return STR_NA_VALUES, set()
+
+ return set(), set()
+ else:
+ return na_values, na_fvalues
+
+
+def _validate_parse_dates_arg(parse_dates):
+ """
+ Check whether or not the 'parse_dates' parameter
+ is a non-boolean scalar. Raises a ValueError if
+ that is the case.
+ """
+ msg = (
+ "Only booleans, lists, and dictionaries are accepted "
+ "for the 'parse_dates' parameter"
+ )
+
+ if not (
+ parse_dates is None
+ or lib.is_bool(parse_dates)
+ or isinstance(parse_dates, (list, dict))
+ ):
+ raise TypeError(msg)
+
+ return parse_dates
+
+
+def is_index_col(col) -> bool:
+ return col is not None and col is not False
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cd788c5e57399597e3fe4ee1b1bf2af4bffd74b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py
@@ -0,0 +1,410 @@
+from __future__ import annotations
+
+from collections import defaultdict
+from typing import TYPE_CHECKING
+import warnings
+
+import numpy as np
+
+from pandas._libs import (
+ lib,
+ parsers,
+)
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import DtypeWarning
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.common import pandas_dtype
+from pandas.core.dtypes.concat import (
+ concat_compat,
+ union_categoricals,
+)
+from pandas.core.dtypes.dtypes import CategoricalDtype
+
+from pandas.core.indexes.api import ensure_index_from_sequences
+
+from pandas.io.common import (
+ dedup_names,
+ is_potential_multi_index,
+)
+from pandas.io.parsers.base_parser import (
+ ParserBase,
+ ParserError,
+ is_index_col,
+)
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Hashable,
+ Mapping,
+ Sequence,
+ )
+
+ from pandas._typing import (
+ ArrayLike,
+ DtypeArg,
+ DtypeObj,
+ ReadCsvBuffer,
+ )
+
+ from pandas import (
+ Index,
+ MultiIndex,
+ )
+
+
+class CParserWrapper(ParserBase):
+ low_memory: bool
+ _reader: parsers.TextReader
+
+ def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None:
+ super().__init__(kwds)
+ self.kwds = kwds
+ kwds = kwds.copy()
+
+ self.low_memory = kwds.pop("low_memory", False)
+
+ # #2442
+ # error: Cannot determine type of 'index_col'
+ kwds["allow_leading_cols"] = (
+ self.index_col is not False # type: ignore[has-type]
+ )
+
+ # GH20529, validate usecol arg before TextReader
+ kwds["usecols"] = self.usecols
+
+ # Have to pass int, would break tests using TextReader directly otherwise :(
+ kwds["on_bad_lines"] = self.on_bad_lines.value
+
+ for key in (
+ "storage_options",
+ "encoding",
+ "memory_map",
+ "compression",
+ ):
+ kwds.pop(key, None)
+
+ kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))
+ if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default:
+ kwds["dtype_backend"] = "numpy"
+ if kwds["dtype_backend"] == "pyarrow":
+ # Fail here loudly instead of in cython after reading
+ import_optional_dependency("pyarrow")
+ self._reader = parsers.TextReader(src, **kwds)
+
+ self.unnamed_cols = self._reader.unnamed_cols
+
+ # error: Cannot determine type of 'names'
+ passed_names = self.names is None # type: ignore[has-type]
+
+ if self._reader.header is None:
+ self.names = None
+ else:
+ # error: Cannot determine type of 'names'
+ # error: Cannot determine type of 'index_names'
+ (
+ self.names, # type: ignore[has-type]
+ self.index_names,
+ self.col_names,
+ passed_names,
+ ) = self._extract_multi_indexer_columns(
+ self._reader.header,
+ self.index_names, # type: ignore[has-type]
+ passed_names,
+ )
+
+ # error: Cannot determine type of 'names'
+ if self.names is None: # type: ignore[has-type]
+ self.names = list(range(self._reader.table_width))
+
+ # gh-9755
+ #
+ # need to set orig_names here first
+ # so that proper indexing can be done
+ # with _set_noconvert_columns
+ #
+ # once names has been filtered, we will
+ # then set orig_names again to names
+ # error: Cannot determine type of 'names'
+ self.orig_names = self.names[:] # type: ignore[has-type]
+
+ if self.usecols:
+ usecols = self._evaluate_usecols(self.usecols, self.orig_names)
+
+ # GH 14671
+ # assert for mypy, orig_names is List or None, None would error in issubset
+ assert self.orig_names is not None
+ if self.usecols_dtype == "string" and not set(usecols).issubset(
+ self.orig_names
+ ):
+ self._validate_usecols_names(usecols, self.orig_names)
+
+ # error: Cannot determine type of 'names'
+ if len(self.names) > len(usecols): # type: ignore[has-type]
+ # error: Cannot determine type of 'names'
+ self.names = [ # type: ignore[has-type]
+ n
+ # error: Cannot determine type of 'names'
+ for i, n in enumerate(self.names) # type: ignore[has-type]
+ if (i in usecols or n in usecols)
+ ]
+
+ # error: Cannot determine type of 'names'
+ if len(self.names) < len(usecols): # type: ignore[has-type]
+ # error: Cannot determine type of 'names'
+ self._validate_usecols_names(
+ usecols,
+ self.names, # type: ignore[has-type]
+ )
+
+ # error: Cannot determine type of 'names'
+ self._validate_parse_dates_presence(self.names) # type: ignore[has-type]
+ self._set_noconvert_columns()
+
+ # error: Cannot determine type of 'names'
+ self.orig_names = self.names # type: ignore[has-type]
+
+ if not self._has_complex_date_col:
+ # error: Cannot determine type of 'index_col'
+ if self._reader.leading_cols == 0 and is_index_col(
+ self.index_col # type: ignore[has-type]
+ ):
+ self._name_processed = True
+ (
+ index_names,
+ # error: Cannot determine type of 'names'
+ self.names, # type: ignore[has-type]
+ self.index_col,
+ ) = self._clean_index_names(
+ # error: Cannot determine type of 'names'
+ self.names, # type: ignore[has-type]
+ # error: Cannot determine type of 'index_col'
+ self.index_col, # type: ignore[has-type]
+ )
+
+ if self.index_names is None:
+ self.index_names = index_names
+
+ if self._reader.header is None and not passed_names:
+ assert self.index_names is not None
+ self.index_names = [None] * len(self.index_names)
+
+ self._implicit_index = self._reader.leading_cols > 0
+
+ def close(self) -> None:
+ # close handles opened by C parser
+ try:
+ self._reader.close()
+ except ValueError:
+ pass
+
+ def _set_noconvert_columns(self) -> None:
+ """
+ Set the columns that should not undergo dtype conversions.
+
+ Currently, any column that is involved with date parsing will not
+ undergo such conversions.
+ """
+ assert self.orig_names is not None
+ # error: Cannot determine type of 'names'
+
+ # much faster than using orig_names.index(x) xref GH#44106
+ names_dict = {x: i for i, x in enumerate(self.orig_names)}
+ col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type]
+ # error: Cannot determine type of 'names'
+ noconvert_columns = self._set_noconvert_dtype_columns(
+ col_indices,
+ self.names, # type: ignore[has-type]
+ )
+ for col in noconvert_columns:
+ self._reader.set_noconvert(col)
+
+ def read(
+ self,
+ nrows: int | None = None,
+ ) -> tuple[
+ Index | MultiIndex | None,
+ Sequence[Hashable] | MultiIndex,
+ Mapping[Hashable, ArrayLike],
+ ]:
+ index: Index | MultiIndex | None
+ column_names: Sequence[Hashable] | MultiIndex
+ try:
+ if self.low_memory:
+ chunks = self._reader.read_low_memory(nrows)
+ # destructive to chunks
+ data = _concatenate_chunks(chunks)
+
+ else:
+ data = self._reader.read(nrows)
+ except StopIteration:
+ if self._first_chunk:
+ self._first_chunk = False
+ names = dedup_names(
+ self.orig_names,
+ is_potential_multi_index(self.orig_names, self.index_col),
+ )
+ index, columns, col_dict = self._get_empty_meta(
+ names,
+ dtype=self.dtype,
+ )
+ columns = self._maybe_make_multi_index_columns(columns, self.col_names)
+
+ if self.usecols is not None:
+ columns = self._filter_usecols(columns)
+
+ col_dict = {k: v for k, v in col_dict.items() if k in columns}
+
+ return index, columns, col_dict
+
+ else:
+ self.close()
+ raise
+
+ # Done with first read, next time raise StopIteration
+ self._first_chunk = False
+
+ # error: Cannot determine type of 'names'
+ names = self.names # type: ignore[has-type]
+
+ if self._reader.leading_cols:
+ if self._has_complex_date_col:
+ raise NotImplementedError("file structure not yet supported")
+
+ # implicit index, no index names
+ arrays = []
+
+ if self.index_col and self._reader.leading_cols != len(self.index_col):
+ raise ParserError(
+ "Could not construct index. Requested to use "
+ f"{len(self.index_col)} number of columns, but "
+ f"{self._reader.leading_cols} left to parse."
+ )
+
+ for i in range(self._reader.leading_cols):
+ if self.index_col is None:
+ values = data.pop(i)
+ else:
+ values = data.pop(self.index_col[i])
+
+ values = self._maybe_parse_dates(values, i, try_parse_dates=True)
+ arrays.append(values)
+
+ index = ensure_index_from_sequences(arrays)
+
+ if self.usecols is not None:
+ names = self._filter_usecols(names)
+
+ names = dedup_names(names, is_potential_multi_index(names, self.index_col))
+
+ # rename dict keys
+ data_tups = sorted(data.items())
+ data = {k: v for k, (i, v) in zip(names, data_tups)}
+
+ column_names, date_data = self._do_date_conversions(names, data)
+
+ # maybe create a mi on the columns
+ column_names = self._maybe_make_multi_index_columns(
+ column_names, self.col_names
+ )
+
+ else:
+ # rename dict keys
+ data_tups = sorted(data.items())
+
+ # ugh, mutation
+
+ # assert for mypy, orig_names is List or None, None would error in list(...)
+ assert self.orig_names is not None
+ names = list(self.orig_names)
+ names = dedup_names(names, is_potential_multi_index(names, self.index_col))
+
+ if self.usecols is not None:
+ names = self._filter_usecols(names)
+
+ # columns as list
+ alldata = [x[1] for x in data_tups]
+ if self.usecols is None:
+ self._check_data_length(names, alldata)
+
+ data = {k: v for k, (i, v) in zip(names, data_tups)}
+
+ names, date_data = self._do_date_conversions(names, data)
+ index, column_names = self._make_index(date_data, alldata, names)
+
+ return index, column_names, date_data
+
+ def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]:
+ # hackish
+ usecols = self._evaluate_usecols(self.usecols, names)
+ if usecols is not None and len(names) != len(usecols):
+ names = [
+ name for i, name in enumerate(names) if i in usecols or name in usecols
+ ]
+ return names
+
+ def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True):
+ if try_parse_dates and self._should_parse_dates(index):
+ values = self._date_conv(
+ values,
+ col=self.index_names[index] if self.index_names is not None else None,
+ )
+ return values
+
+
+def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
+ """
+ Concatenate chunks of data read with low_memory=True.
+
+ The tricky part is handling Categoricals, where different chunks
+ may have different inferred categories.
+ """
+ names = list(chunks[0].keys())
+ warning_columns = []
+
+ result: dict = {}
+ for name in names:
+ arrs = [chunk.pop(name) for chunk in chunks]
+ # Check each arr for consistent types.
+ dtypes = {a.dtype for a in arrs}
+ non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)}
+
+ dtype = dtypes.pop()
+ if isinstance(dtype, CategoricalDtype):
+ result[name] = union_categoricals(arrs, sort_categories=False)
+ else:
+ result[name] = concat_compat(arrs)
+ if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object):
+ warning_columns.append(str(name))
+
+ if warning_columns:
+ warning_names = ",".join(warning_columns)
+ warning_message = " ".join(
+ [
+ f"Columns ({warning_names}) have mixed types. "
+ f"Specify dtype option on import or set low_memory=False."
+ ]
+ )
+ warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level())
+ return result
+
+
+def ensure_dtype_objs(
+ dtype: DtypeArg | dict[Hashable, DtypeArg] | None
+) -> DtypeObj | dict[Hashable, DtypeObj] | None:
+ """
+ Ensure we have either None, a dtype object, or a dictionary mapping to
+ dtype objects.
+ """
+ if isinstance(dtype, defaultdict):
+ # "None" not callable [misc]
+ default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc]
+ dtype_converted: defaultdict = defaultdict(lambda: default_dtype)
+ for key in dtype.keys():
+ dtype_converted[key] = pandas_dtype(dtype[key])
+ return dtype_converted
+ elif isinstance(dtype, dict):
+ return {k: pandas_dtype(dtype[k]) for k in dtype}
+ elif dtype is not None:
+ return pandas_dtype(dtype)
+ return dtype
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..79e7554a5744cf439a65e9fd1e18782a0fa71548
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py
@@ -0,0 +1,1387 @@
+from __future__ import annotations
+
+from collections import (
+ abc,
+ defaultdict,
+)
+from collections.abc import (
+ Hashable,
+ Iterator,
+ Mapping,
+ Sequence,
+)
+import csv
+from io import StringIO
+import re
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ DefaultDict,
+ Literal,
+ cast,
+)
+import warnings
+
+import numpy as np
+
+from pandas._libs import lib
+from pandas.errors import (
+ EmptyDataError,
+ ParserError,
+ ParserWarning,
+)
+from pandas.util._decorators import cache_readonly
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.common import (
+ is_bool_dtype,
+ is_integer,
+ is_numeric_dtype,
+)
+from pandas.core.dtypes.inference import is_dict_like
+
+from pandas.io.common import (
+ dedup_names,
+ is_potential_multi_index,
+)
+from pandas.io.parsers.base_parser import (
+ ParserBase,
+ parser_defaults,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ ArrayLike,
+ ReadCsvBuffer,
+ Scalar,
+ )
+
+ from pandas import (
+ Index,
+ MultiIndex,
+ )
+
+# BOM character (byte order mark)
+# This exists at the beginning of a file to indicate endianness
+# of a file (stream). Unfortunately, this marker screws up parsing,
+# so we need to remove it if we see it.
+_BOM = "\ufeff"
+
+
+class PythonParser(ParserBase):
+ _no_thousands_columns: set[int]
+
+ def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None:
+ """
+ Workhorse function for processing nested list into DataFrame
+ """
+ super().__init__(kwds)
+
+ self.data: Iterator[str] | None = None
+ self.buf: list = []
+ self.pos = 0
+ self.line_pos = 0
+
+ self.skiprows = kwds["skiprows"]
+
+ if callable(self.skiprows):
+ self.skipfunc = self.skiprows
+ else:
+ self.skipfunc = lambda x: x in self.skiprows
+
+ self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"])
+ self.delimiter = kwds["delimiter"]
+
+ self.quotechar = kwds["quotechar"]
+ if isinstance(self.quotechar, str):
+ self.quotechar = str(self.quotechar)
+
+ self.escapechar = kwds["escapechar"]
+ self.doublequote = kwds["doublequote"]
+ self.skipinitialspace = kwds["skipinitialspace"]
+ self.lineterminator = kwds["lineterminator"]
+ self.quoting = kwds["quoting"]
+ self.skip_blank_lines = kwds["skip_blank_lines"]
+
+ self.has_index_names = False
+ if "has_index_names" in kwds:
+ self.has_index_names = kwds["has_index_names"]
+
+ self.verbose = kwds["verbose"]
+
+ self.thousands = kwds["thousands"]
+ self.decimal = kwds["decimal"]
+
+ self.comment = kwds["comment"]
+
+ # Set self.data to something that can read lines.
+ if isinstance(f, list):
+ # read_excel: f is a list
+ self.data = cast(Iterator[str], f)
+ else:
+ assert hasattr(f, "readline")
+ self.data = self._make_reader(f)
+
+ # Get columns in two steps: infer from data, then
+ # infer column indices from self.usecols if it is specified.
+ self._col_indices: list[int] | None = None
+ columns: list[list[Scalar | None]]
+ (
+ columns,
+ self.num_original_columns,
+ self.unnamed_cols,
+ ) = self._infer_columns()
+
+ # Now self.columns has the set of columns that we will process.
+ # The original set is stored in self.original_columns.
+ # error: Cannot determine type of 'index_names'
+ (
+ self.columns,
+ self.index_names,
+ self.col_names,
+ _,
+ ) = self._extract_multi_indexer_columns(
+ columns,
+ self.index_names, # type: ignore[has-type]
+ )
+
+ # get popped off for index
+ self.orig_names: list[Hashable] = list(self.columns)
+
+ # needs to be cleaned/refactored
+ # multiple date column thing turning into a real spaghetti factory
+
+ if not self._has_complex_date_col:
+ (index_names, self.orig_names, self.columns) = self._get_index_name()
+ self._name_processed = True
+ if self.index_names is None:
+ self.index_names = index_names
+
+ if self._col_indices is None:
+ self._col_indices = list(range(len(self.columns)))
+
+ self._parse_date_cols = self._validate_parse_dates_presence(self.columns)
+ self._no_thousands_columns = self._set_no_thousand_columns()
+
+ if len(self.decimal) != 1:
+ raise ValueError("Only length-1 decimal markers supported")
+
+ @cache_readonly
+ def num(self) -> re.Pattern:
+ decimal = re.escape(self.decimal)
+ if self.thousands is None:
+ regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"
+ else:
+ thousands = re.escape(self.thousands)
+ regex = (
+ rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
+ rf"([0-9]?(E|e)\-?[0-9]+)?$"
+ )
+ return re.compile(regex)
+
+ def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]):
+ sep = self.delimiter
+
+ if sep is None or len(sep) == 1:
+ if self.lineterminator:
+ raise ValueError(
+ "Custom line terminators not supported in python parser (yet)"
+ )
+
+ class MyDialect(csv.Dialect):
+ delimiter = self.delimiter
+ quotechar = self.quotechar
+ escapechar = self.escapechar
+ doublequote = self.doublequote
+ skipinitialspace = self.skipinitialspace
+ quoting = self.quoting
+ lineterminator = "\n"
+
+ dia = MyDialect
+
+ if sep is not None:
+ dia.delimiter = sep
+ else:
+ # attempt to sniff the delimiter from the first valid line,
+ # i.e. no comment line and not in skiprows
+ line = f.readline()
+ lines = self._check_comments([[line]])[0]
+ while self.skipfunc(self.pos) or not lines:
+ self.pos += 1
+ line = f.readline()
+ lines = self._check_comments([[line]])[0]
+ lines_str = cast(list[str], lines)
+
+ # since `line` was a string, lines will be a list containing
+ # only a single string
+ line = lines_str[0]
+
+ self.pos += 1
+ self.line_pos += 1
+ sniffed = csv.Sniffer().sniff(line)
+ dia.delimiter = sniffed.delimiter
+
+ # Note: encoding is irrelevant here
+ line_rdr = csv.reader(StringIO(line), dialect=dia)
+ self.buf.extend(list(line_rdr))
+
+ # Note: encoding is irrelevant here
+ reader = csv.reader(f, dialect=dia, strict=True)
+
+ else:
+
+ def _read():
+ line = f.readline()
+ pat = re.compile(sep)
+
+ yield pat.split(line.strip())
+
+ for line in f:
+ yield pat.split(line.strip())
+
+ reader = _read()
+
+ return reader
+
+ def read(
+ self, rows: int | None = None
+ ) -> tuple[
+ Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]
+ ]:
+ try:
+ content = self._get_lines(rows)
+ except StopIteration:
+ if self._first_chunk:
+ content = []
+ else:
+ self.close()
+ raise
+
+ # done with first read, next time raise StopIteration
+ self._first_chunk = False
+
+ columns: Sequence[Hashable] = list(self.orig_names)
+ if not len(content): # pragma: no cover
+ # DataFrame with the right metadata, even though it's length 0
+ # error: Cannot determine type of 'index_col'
+ names = dedup_names(
+ self.orig_names,
+ is_potential_multi_index(
+ self.orig_names,
+ self.index_col, # type: ignore[has-type]
+ ),
+ )
+ index, columns, col_dict = self._get_empty_meta(
+ names,
+ self.dtype,
+ )
+ conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names)
+ return index, conv_columns, col_dict
+
+ # handle new style for names in index
+ count_empty_content_vals = count_empty_vals(content[0])
+ indexnamerow = None
+ if self.has_index_names and count_empty_content_vals == len(columns):
+ indexnamerow = content[0]
+ content = content[1:]
+
+ alldata = self._rows_to_cols(content)
+ data, columns = self._exclude_implicit_index(alldata)
+
+ conv_data = self._convert_data(data)
+ columns, conv_data = self._do_date_conversions(columns, conv_data)
+
+ index, result_columns = self._make_index(
+ conv_data, alldata, columns, indexnamerow
+ )
+
+ return index, result_columns, conv_data
+
+ def _exclude_implicit_index(
+ self,
+ alldata: list[np.ndarray],
+ ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]:
+ # error: Cannot determine type of 'index_col'
+ names = dedup_names(
+ self.orig_names,
+ is_potential_multi_index(
+ self.orig_names,
+ self.index_col, # type: ignore[has-type]
+ ),
+ )
+
+ offset = 0
+ if self._implicit_index:
+ # error: Cannot determine type of 'index_col'
+ offset = len(self.index_col) # type: ignore[has-type]
+
+ len_alldata = len(alldata)
+ self._check_data_length(names, alldata)
+
+ return {
+ name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata
+ }, names
+
+ # legacy
+ def get_chunk(
+ self, size: int | None = None
+ ) -> tuple[
+ Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]
+ ]:
+ if size is None:
+ # error: "PythonParser" has no attribute "chunksize"
+ size = self.chunksize # type: ignore[attr-defined]
+ return self.read(rows=size)
+
+ def _convert_data(
+ self,
+ data: Mapping[Hashable, np.ndarray],
+ ) -> Mapping[Hashable, ArrayLike]:
+ # apply converters
+ clean_conv = self._clean_mapping(self.converters)
+ clean_dtypes = self._clean_mapping(self.dtype)
+
+ # Apply NA values.
+ clean_na_values = {}
+ clean_na_fvalues = {}
+
+ if isinstance(self.na_values, dict):
+ for col in self.na_values:
+ na_value = self.na_values[col]
+ na_fvalue = self.na_fvalues[col]
+
+ if isinstance(col, int) and col not in self.orig_names:
+ col = self.orig_names[col]
+
+ clean_na_values[col] = na_value
+ clean_na_fvalues[col] = na_fvalue
+ else:
+ clean_na_values = self.na_values
+ clean_na_fvalues = self.na_fvalues
+
+ return self._convert_to_ndarrays(
+ data,
+ clean_na_values,
+ clean_na_fvalues,
+ self.verbose,
+ clean_conv,
+ clean_dtypes,
+ )
+
+ @cache_readonly
+ def _have_mi_columns(self) -> bool:
+ if self.header is None:
+ return False
+
+ header = self.header
+ if isinstance(header, (list, tuple, np.ndarray)):
+ return len(header) > 1
+ else:
+ return False
+
+ def _infer_columns(
+ self,
+ ) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]:
+ names = self.names
+ num_original_columns = 0
+ clear_buffer = True
+ unnamed_cols: set[Scalar | None] = set()
+
+ if self.header is not None:
+ header = self.header
+ have_mi_columns = self._have_mi_columns
+
+ if isinstance(header, (list, tuple, np.ndarray)):
+ # we have a mi columns, so read an extra line
+ if have_mi_columns:
+ header = list(header) + [header[-1] + 1]
+ else:
+ header = [header]
+
+ columns: list[list[Scalar | None]] = []
+ for level, hr in enumerate(header):
+ try:
+ line = self._buffered_line()
+
+ while self.line_pos <= hr:
+ line = self._next_line()
+
+ except StopIteration as err:
+ if 0 < self.line_pos <= hr and (
+ not have_mi_columns or hr != header[-1]
+ ):
+ # If no rows we want to raise a different message and if
+ # we have mi columns, the last line is not part of the header
+ joi = list(map(str, header[:-1] if have_mi_columns else header))
+ msg = f"[{','.join(joi)}], len of {len(joi)}, "
+ raise ValueError(
+ f"Passed header={msg}"
+ f"but only {self.line_pos} lines in file"
+ ) from err
+
+ # We have an empty file, so check
+ # if columns are provided. That will
+ # serve as the 'line' for parsing
+ if have_mi_columns and hr > 0:
+ if clear_buffer:
+ self._clear_buffer()
+ columns.append([None] * len(columns[-1]))
+ return columns, num_original_columns, unnamed_cols
+
+ if not self.names:
+ raise EmptyDataError("No columns to parse from file") from err
+
+ line = self.names[:]
+
+ this_columns: list[Scalar | None] = []
+ this_unnamed_cols = []
+
+ for i, c in enumerate(line):
+ if c == "":
+ if have_mi_columns:
+ col_name = f"Unnamed: {i}_level_{level}"
+ else:
+ col_name = f"Unnamed: {i}"
+
+ this_unnamed_cols.append(i)
+ this_columns.append(col_name)
+ else:
+ this_columns.append(c)
+
+ if not have_mi_columns:
+ counts: DefaultDict = defaultdict(int)
+ # Ensure that regular columns are used before unnamed ones
+ # to keep given names and mangle unnamed columns
+ col_loop_order = [
+ i
+ for i in range(len(this_columns))
+ if i not in this_unnamed_cols
+ ] + this_unnamed_cols
+
+ # TODO: Use pandas.io.common.dedup_names instead (see #50371)
+ for i in col_loop_order:
+ col = this_columns[i]
+ old_col = col
+ cur_count = counts[col]
+
+ if cur_count > 0:
+ while cur_count > 0:
+ counts[old_col] = cur_count + 1
+ col = f"{old_col}.{cur_count}"
+ if col in this_columns:
+ cur_count += 1
+ else:
+ cur_count = counts[col]
+
+ if (
+ self.dtype is not None
+ and is_dict_like(self.dtype)
+ and self.dtype.get(old_col) is not None
+ and self.dtype.get(col) is None
+ ):
+ self.dtype.update({col: self.dtype.get(old_col)})
+ this_columns[i] = col
+ counts[col] = cur_count + 1
+ elif have_mi_columns:
+ # if we have grabbed an extra line, but its not in our
+ # format so save in the buffer, and create an blank extra
+ # line for the rest of the parsing code
+ if hr == header[-1]:
+ lc = len(this_columns)
+ # error: Cannot determine type of 'index_col'
+ sic = self.index_col # type: ignore[has-type]
+ ic = len(sic) if sic is not None else 0
+ unnamed_count = len(this_unnamed_cols)
+
+ # if wrong number of blanks or no index, not our format
+ if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0:
+ clear_buffer = False
+ this_columns = [None] * lc
+ self.buf = [self.buf[-1]]
+
+ columns.append(this_columns)
+ unnamed_cols.update({this_columns[i] for i in this_unnamed_cols})
+
+ if len(columns) == 1:
+ num_original_columns = len(this_columns)
+
+ if clear_buffer:
+ self._clear_buffer()
+
+ first_line: list[Scalar] | None
+ if names is not None:
+ # Read first row after header to check if data are longer
+ try:
+ first_line = self._next_line()
+ except StopIteration:
+ first_line = None
+
+ len_first_data_row = 0 if first_line is None else len(first_line)
+
+ if len(names) > len(columns[0]) and len(names) > len_first_data_row:
+ raise ValueError(
+ "Number of passed names did not match "
+ "number of header fields in the file"
+ )
+ if len(columns) > 1:
+ raise TypeError("Cannot pass names with multi-index columns")
+
+ if self.usecols is not None:
+ # Set _use_cols. We don't store columns because they are
+ # overwritten.
+ self._handle_usecols(columns, names, num_original_columns)
+ else:
+ num_original_columns = len(names)
+ if self._col_indices is not None and len(names) != len(
+ self._col_indices
+ ):
+ columns = [[names[i] for i in sorted(self._col_indices)]]
+ else:
+ columns = [names]
+ else:
+ columns = self._handle_usecols(
+ columns, columns[0], num_original_columns
+ )
+ else:
+ ncols = len(self._header_line)
+ num_original_columns = ncols
+
+ if not names:
+ columns = [list(range(ncols))]
+ columns = self._handle_usecols(columns, columns[0], ncols)
+ elif self.usecols is None or len(names) >= ncols:
+ columns = self._handle_usecols([names], names, ncols)
+ num_original_columns = len(names)
+ elif not callable(self.usecols) and len(names) != len(self.usecols):
+ raise ValueError(
+ "Number of passed names did not match number of "
+ "header fields in the file"
+ )
+ else:
+ # Ignore output but set used columns.
+ columns = [names]
+ self._handle_usecols(columns, columns[0], ncols)
+
+ return columns, num_original_columns, unnamed_cols
+
+ @cache_readonly
+ def _header_line(self):
+ # Store line for reuse in _get_index_name
+ if self.header is not None:
+ return None
+
+ try:
+ line = self._buffered_line()
+ except StopIteration as err:
+ if not self.names:
+ raise EmptyDataError("No columns to parse from file") from err
+
+ line = self.names[:]
+ return line
+
+ def _handle_usecols(
+ self,
+ columns: list[list[Scalar | None]],
+ usecols_key: list[Scalar | None],
+ num_original_columns: int,
+ ) -> list[list[Scalar | None]]:
+ """
+ Sets self._col_indices
+
+ usecols_key is used if there are string usecols.
+ """
+ col_indices: set[int] | list[int]
+ if self.usecols is not None:
+ if callable(self.usecols):
+ col_indices = self._evaluate_usecols(self.usecols, usecols_key)
+ elif any(isinstance(u, str) for u in self.usecols):
+ if len(columns) > 1:
+ raise ValueError(
+ "If using multiple headers, usecols must be integers."
+ )
+ col_indices = []
+
+ for col in self.usecols:
+ if isinstance(col, str):
+ try:
+ col_indices.append(usecols_key.index(col))
+ except ValueError:
+ self._validate_usecols_names(self.usecols, usecols_key)
+ else:
+ col_indices.append(col)
+ else:
+ missing_usecols = [
+ col for col in self.usecols if col >= num_original_columns
+ ]
+ if missing_usecols:
+ raise ParserError(
+ "Defining usecols with out-of-bounds indices is not allowed. "
+ f"{missing_usecols} are out-of-bounds.",
+ )
+ col_indices = self.usecols
+
+ columns = [
+ [n for i, n in enumerate(column) if i in col_indices]
+ for column in columns
+ ]
+ self._col_indices = sorted(col_indices)
+ return columns
+
+ def _buffered_line(self) -> list[Scalar]:
+ """
+ Return a line from buffer, filling buffer if required.
+ """
+ if len(self.buf) > 0:
+ return self.buf[0]
+ else:
+ return self._next_line()
+
+ def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]:
+ """
+ Checks whether the file begins with the BOM character.
+ If it does, remove it. In addition, if there is quoting
+ in the field subsequent to the BOM, remove it as well
+ because it technically takes place at the beginning of
+ the name, not the middle of it.
+ """
+ # first_row will be a list, so we need to check
+ # that that list is not empty before proceeding.
+ if not first_row:
+ return first_row
+
+ # The first element of this row is the one that could have the
+ # BOM that we want to remove. Check that the first element is a
+ # string before proceeding.
+ if not isinstance(first_row[0], str):
+ return first_row
+
+ # Check that the string is not empty, as that would
+ # obviously not have a BOM at the start of it.
+ if not first_row[0]:
+ return first_row
+
+ # Since the string is non-empty, check that it does
+ # in fact begin with a BOM.
+ first_elt = first_row[0][0]
+ if first_elt != _BOM:
+ return first_row
+
+ first_row_bom = first_row[0]
+ new_row: str
+
+ if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar:
+ start = 2
+ quote = first_row_bom[1]
+ end = first_row_bom[2:].index(quote) + 2
+
+ # Extract the data between the quotation marks
+ new_row = first_row_bom[start:end]
+
+ # Extract any remaining data after the second
+ # quotation mark.
+ if len(first_row_bom) > end + 1:
+ new_row += first_row_bom[end + 1 :]
+
+ else:
+ # No quotation so just remove BOM from first element
+ new_row = first_row_bom[1:]
+
+ new_row_list: list[Scalar] = [new_row]
+ return new_row_list + first_row[1:]
+
+ def _is_line_empty(self, line: list[Scalar]) -> bool:
+ """
+ Check if a line is empty or not.
+
+ Parameters
+ ----------
+ line : str, array-like
+ The line of data to check.
+
+ Returns
+ -------
+ boolean : Whether or not the line is empty.
+ """
+ return not line or all(not x for x in line)
+
+ def _next_line(self) -> list[Scalar]:
+ if isinstance(self.data, list):
+ while self.skipfunc(self.pos):
+ if self.pos >= len(self.data):
+ break
+ self.pos += 1
+
+ while True:
+ try:
+ line = self._check_comments([self.data[self.pos]])[0]
+ self.pos += 1
+ # either uncommented or blank to begin with
+ if not self.skip_blank_lines and (
+ self._is_line_empty(self.data[self.pos - 1]) or line
+ ):
+ break
+ if self.skip_blank_lines:
+ ret = self._remove_empty_lines([line])
+ if ret:
+ line = ret[0]
+ break
+ except IndexError:
+ raise StopIteration
+ else:
+ while self.skipfunc(self.pos):
+ self.pos += 1
+ # assert for mypy, data is Iterator[str] or None, would error in next
+ assert self.data is not None
+ next(self.data)
+
+ while True:
+ orig_line = self._next_iter_line(row_num=self.pos + 1)
+ self.pos += 1
+
+ if orig_line is not None:
+ line = self._check_comments([orig_line])[0]
+
+ if self.skip_blank_lines:
+ ret = self._remove_empty_lines([line])
+
+ if ret:
+ line = ret[0]
+ break
+ elif self._is_line_empty(orig_line) or line:
+ break
+
+ # This was the first line of the file,
+ # which could contain the BOM at the
+ # beginning of it.
+ if self.pos == 1:
+ line = self._check_for_bom(line)
+
+ self.line_pos += 1
+ self.buf.append(line)
+ return line
+
+ def _alert_malformed(self, msg: str, row_num: int) -> None:
+ """
+ Alert a user about a malformed row, depending on value of
+ `self.on_bad_lines` enum.
+
+ If `self.on_bad_lines` is ERROR, the alert will be `ParserError`.
+ If `self.on_bad_lines` is WARN, the alert will be printed out.
+
+ Parameters
+ ----------
+ msg: str
+ The error message to display.
+ row_num: int
+ The row number where the parsing error occurred.
+ Because this row number is displayed, we 1-index,
+ even though we 0-index internally.
+ """
+ if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
+ raise ParserError(msg)
+ if self.on_bad_lines == self.BadLineHandleMethod.WARN:
+ warnings.warn(
+ f"Skipping line {row_num}: {msg}\n",
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ def _next_iter_line(self, row_num: int) -> list[Scalar] | None:
+ """
+ Wrapper around iterating through `self.data` (CSV source).
+
+ When a CSV error is raised, we check for specific
+ error messages that allow us to customize the
+ error message displayed to the user.
+
+ Parameters
+ ----------
+ row_num: int
+ The row number of the line being parsed.
+ """
+ try:
+ # assert for mypy, data is Iterator[str] or None, would error in next
+ assert self.data is not None
+ line = next(self.data)
+ # for mypy
+ assert isinstance(line, list)
+ return line
+ except csv.Error as e:
+ if self.on_bad_lines in (
+ self.BadLineHandleMethod.ERROR,
+ self.BadLineHandleMethod.WARN,
+ ):
+ msg = str(e)
+
+ if "NULL byte" in msg or "line contains NUL" in msg:
+ msg = (
+ "NULL byte detected. This byte "
+ "cannot be processed in Python's "
+ "native csv library at the moment, "
+ "so please pass in engine='c' instead"
+ )
+
+ if self.skipfooter > 0:
+ reason = (
+ "Error could possibly be due to "
+ "parsing errors in the skipped footer rows "
+ "(the skipfooter keyword is only applied "
+ "after Python's csv library has parsed "
+ "all rows)."
+ )
+ msg += ". " + reason
+
+ self._alert_malformed(msg, row_num)
+ return None
+
+ def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ if self.comment is None:
+ return lines
+ ret = []
+ for line in lines:
+ rl = []
+ for x in line:
+ if (
+ not isinstance(x, str)
+ or self.comment not in x
+ or x in self.na_values
+ ):
+ rl.append(x)
+ else:
+ x = x[: x.find(self.comment)]
+ if len(x) > 0:
+ rl.append(x)
+ break
+ ret.append(rl)
+ return ret
+
+ def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ """
+ Iterate through the lines and remove any that are
+ either empty or contain only one whitespace value
+
+ Parameters
+ ----------
+ lines : list of list of Scalars
+ The array of lines that we are to filter.
+
+ Returns
+ -------
+ filtered_lines : list of list of Scalars
+ The same array of lines with the "empty" ones removed.
+ """
+ # Remove empty lines and lines with only one whitespace value
+ ret = [
+ line
+ for line in lines
+ if (
+ len(line) > 1
+ or len(line) == 1
+ and (not isinstance(line[0], str) or line[0].strip())
+ )
+ ]
+ return ret
+
+ def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ if self.thousands is None:
+ return lines
+
+ return self._search_replace_num_columns(
+ lines=lines, search=self.thousands, replace=""
+ )
+
+ def _search_replace_num_columns(
+ self, lines: list[list[Scalar]], search: str, replace: str
+ ) -> list[list[Scalar]]:
+ ret = []
+ for line in lines:
+ rl = []
+ for i, x in enumerate(line):
+ if (
+ not isinstance(x, str)
+ or search not in x
+ or i in self._no_thousands_columns
+ or not self.num.search(x.strip())
+ ):
+ rl.append(x)
+ else:
+ rl.append(x.replace(search, replace))
+ ret.append(rl)
+ return ret
+
+ def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ if self.decimal == parser_defaults["decimal"]:
+ return lines
+
+ return self._search_replace_num_columns(
+ lines=lines, search=self.decimal, replace="."
+ )
+
+ def _clear_buffer(self) -> None:
+ self.buf = []
+
+ def _get_index_name(
+ self,
+ ) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]:
+ """
+ Try several cases to get lines:
+
+ 0) There are headers on row 0 and row 1 and their
+ total summed lengths equals the length of the next line.
+ Treat row 0 as columns and row 1 as indices
+ 1) Look for implicit index: there are more columns
+ on row 1 than row 0. If this is true, assume that row
+ 1 lists index columns and row 0 lists normal columns.
+ 2) Get index from the columns if it was listed.
+ """
+ columns: Sequence[Hashable] = self.orig_names
+ orig_names = list(columns)
+ columns = list(columns)
+
+ line: list[Scalar] | None
+ if self._header_line is not None:
+ line = self._header_line
+ else:
+ try:
+ line = self._next_line()
+ except StopIteration:
+ line = None
+
+ next_line: list[Scalar] | None
+ try:
+ next_line = self._next_line()
+ except StopIteration:
+ next_line = None
+
+ # implicitly index_col=0 b/c 1 fewer column names
+ implicit_first_cols = 0
+ if line is not None:
+ # leave it 0, #2442
+ # Case 1
+ # error: Cannot determine type of 'index_col'
+ index_col = self.index_col # type: ignore[has-type]
+ if index_col is not False:
+ implicit_first_cols = len(line) - self.num_original_columns
+
+ # Case 0
+ if (
+ next_line is not None
+ and self.header is not None
+ and index_col is not False
+ ):
+ if len(next_line) == len(line) + self.num_original_columns:
+ # column and index names on diff rows
+ self.index_col = list(range(len(line)))
+ self.buf = self.buf[1:]
+
+ for c in reversed(line):
+ columns.insert(0, c)
+
+ # Update list of original names to include all indices.
+ orig_names = list(columns)
+ self.num_original_columns = len(columns)
+ return line, orig_names, columns
+
+ if implicit_first_cols > 0:
+ # Case 1
+ self._implicit_index = True
+ if self.index_col is None:
+ self.index_col = list(range(implicit_first_cols))
+
+ index_name = None
+
+ else:
+ # Case 2
+ (index_name, _, self.index_col) = self._clean_index_names(
+ columns, self.index_col
+ )
+
+ return index_name, orig_names, columns
+
+ def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]:
+ col_len = self.num_original_columns
+
+ if self._implicit_index:
+ col_len += len(self.index_col)
+
+ max_len = max(len(row) for row in content)
+
+ # Check that there are no rows with too many
+ # elements in their row (rows with too few
+ # elements are padded with NaN).
+ # error: Non-overlapping identity check (left operand type: "List[int]",
+ # right operand type: "Literal[False]")
+ if (
+ max_len > col_len
+ and self.index_col is not False # type: ignore[comparison-overlap]
+ and self.usecols is None
+ ):
+ footers = self.skipfooter if self.skipfooter else 0
+ bad_lines = []
+
+ iter_content = enumerate(content)
+ content_len = len(content)
+ content = []
+
+ for i, _content in iter_content:
+ actual_len = len(_content)
+
+ if actual_len > col_len:
+ if callable(self.on_bad_lines):
+ new_l = self.on_bad_lines(_content)
+ if new_l is not None:
+ content.append(new_l)
+ elif self.on_bad_lines in (
+ self.BadLineHandleMethod.ERROR,
+ self.BadLineHandleMethod.WARN,
+ ):
+ row_num = self.pos - (content_len - i + footers)
+ bad_lines.append((row_num, actual_len))
+
+ if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
+ break
+ else:
+ content.append(_content)
+
+ for row_num, actual_len in bad_lines:
+ msg = (
+ f"Expected {col_len} fields in line {row_num + 1}, saw "
+ f"{actual_len}"
+ )
+ if (
+ self.delimiter
+ and len(self.delimiter) > 1
+ and self.quoting != csv.QUOTE_NONE
+ ):
+ # see gh-13374
+ reason = (
+ "Error could possibly be due to quotes being "
+ "ignored when a multi-char delimiter is used."
+ )
+ msg += ". " + reason
+
+ self._alert_malformed(msg, row_num + 1)
+
+ # see gh-13320
+ zipped_content = list(lib.to_object_array(content, min_width=col_len).T)
+
+ if self.usecols:
+ assert self._col_indices is not None
+ col_indices = self._col_indices
+
+ if self._implicit_index:
+ zipped_content = [
+ a
+ for i, a in enumerate(zipped_content)
+ if (
+ i < len(self.index_col)
+ or i - len(self.index_col) in col_indices
+ )
+ ]
+ else:
+ zipped_content = [
+ a for i, a in enumerate(zipped_content) if i in col_indices
+ ]
+ return zipped_content
+
+ def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]:
+ lines = self.buf
+ new_rows = None
+
+ # already fetched some number
+ if rows is not None:
+ # we already have the lines in the buffer
+ if len(self.buf) >= rows:
+ new_rows, self.buf = self.buf[:rows], self.buf[rows:]
+
+ # need some lines
+ else:
+ rows -= len(self.buf)
+
+ if new_rows is None:
+ if isinstance(self.data, list):
+ if self.pos > len(self.data):
+ raise StopIteration
+ if rows is None:
+ new_rows = self.data[self.pos :]
+ new_pos = len(self.data)
+ else:
+ new_rows = self.data[self.pos : self.pos + rows]
+ new_pos = self.pos + rows
+
+ new_rows = self._remove_skipped_rows(new_rows)
+ lines.extend(new_rows)
+ self.pos = new_pos
+
+ else:
+ new_rows = []
+ try:
+ if rows is not None:
+ row_index = 0
+ row_ct = 0
+ offset = self.pos if self.pos is not None else 0
+ while row_ct < rows:
+ # assert for mypy, data is Iterator[str] or None, would
+ # error in next
+ assert self.data is not None
+ new_row = next(self.data)
+ if not self.skipfunc(offset + row_index):
+ row_ct += 1
+ row_index += 1
+ new_rows.append(new_row)
+
+ len_new_rows = len(new_rows)
+ new_rows = self._remove_skipped_rows(new_rows)
+ lines.extend(new_rows)
+ else:
+ rows = 0
+
+ while True:
+ next_row = self._next_iter_line(row_num=self.pos + rows + 1)
+ rows += 1
+
+ if next_row is not None:
+ new_rows.append(next_row)
+ len_new_rows = len(new_rows)
+
+ except StopIteration:
+ len_new_rows = len(new_rows)
+ new_rows = self._remove_skipped_rows(new_rows)
+ lines.extend(new_rows)
+ if len(lines) == 0:
+ raise
+ self.pos += len_new_rows
+
+ self.buf = []
+ else:
+ lines = new_rows
+
+ if self.skipfooter:
+ lines = lines[: -self.skipfooter]
+
+ lines = self._check_comments(lines)
+ if self.skip_blank_lines:
+ lines = self._remove_empty_lines(lines)
+ lines = self._check_thousands(lines)
+ return self._check_decimal(lines)
+
+ def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]:
+ if self.skiprows:
+ return [
+ row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos)
+ ]
+ return new_rows
+
+ def _set_no_thousand_columns(self) -> set[int]:
+ no_thousands_columns: set[int] = set()
+ if self.columns and self.parse_dates:
+ assert self._col_indices is not None
+ no_thousands_columns = self._set_noconvert_dtype_columns(
+ self._col_indices, self.columns
+ )
+ if self.columns and self.dtype:
+ assert self._col_indices is not None
+ for i, col in zip(self._col_indices, self.columns):
+ if not isinstance(self.dtype, dict) and not is_numeric_dtype(
+ self.dtype
+ ):
+ no_thousands_columns.add(i)
+ if (
+ isinstance(self.dtype, dict)
+ and col in self.dtype
+ and (
+ not is_numeric_dtype(self.dtype[col])
+ or is_bool_dtype(self.dtype[col])
+ )
+ ):
+ no_thousands_columns.add(i)
+ return no_thousands_columns
+
+
+class FixedWidthReader(abc.Iterator):
+ """
+ A reader of fixed-width lines.
+ """
+
+ def __init__(
+ self,
+ f: IO[str] | ReadCsvBuffer[str],
+ colspecs: list[tuple[int, int]] | Literal["infer"],
+ delimiter: str | None,
+ comment: str | None,
+ skiprows: set[int] | None = None,
+ infer_nrows: int = 100,
+ ) -> None:
+ self.f = f
+ self.buffer: Iterator | None = None
+ self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t "
+ self.comment = comment
+ if colspecs == "infer":
+ self.colspecs = self.detect_colspecs(
+ infer_nrows=infer_nrows, skiprows=skiprows
+ )
+ else:
+ self.colspecs = colspecs
+
+ if not isinstance(self.colspecs, (tuple, list)):
+ raise TypeError(
+ "column specifications must be a list or tuple, "
+ f"input was a {type(colspecs).__name__}"
+ )
+
+ for colspec in self.colspecs:
+ if not (
+ isinstance(colspec, (tuple, list))
+ and len(colspec) == 2
+ and isinstance(colspec[0], (int, np.integer, type(None)))
+ and isinstance(colspec[1], (int, np.integer, type(None)))
+ ):
+ raise TypeError(
+ "Each column specification must be "
+ "2 element tuple or list of integers"
+ )
+
+ def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]:
+ """
+ Read rows from self.f, skipping as specified.
+
+ We distinguish buffer_rows (the first <= infer_nrows
+ lines) from the rows returned to detect_colspecs
+ because it's simpler to leave the other locations
+ with skiprows logic alone than to modify them to
+ deal with the fact we skipped some rows here as
+ well.
+
+ Parameters
+ ----------
+ infer_nrows : int
+ Number of rows to read from self.f, not counting
+ rows that are skipped.
+ skiprows: set, optional
+ Indices of rows to skip.
+
+ Returns
+ -------
+ detect_rows : list of str
+ A list containing the rows to read.
+
+ """
+ if skiprows is None:
+ skiprows = set()
+ buffer_rows = []
+ detect_rows = []
+ for i, row in enumerate(self.f):
+ if i not in skiprows:
+ detect_rows.append(row)
+ buffer_rows.append(row)
+ if len(detect_rows) >= infer_nrows:
+ break
+ self.buffer = iter(buffer_rows)
+ return detect_rows
+
+ def detect_colspecs(
+ self, infer_nrows: int = 100, skiprows: set[int] | None = None
+ ) -> list[tuple[int, int]]:
+ # Regex escape the delimiters
+ delimiters = "".join([rf"\{x}" for x in self.delimiter])
+ pattern = re.compile(f"([^{delimiters}]+)")
+ rows = self.get_rows(infer_nrows, skiprows)
+ if not rows:
+ raise EmptyDataError("No rows from which to infer column width")
+ max_len = max(map(len, rows))
+ mask = np.zeros(max_len + 1, dtype=int)
+ if self.comment is not None:
+ rows = [row.partition(self.comment)[0] for row in rows]
+ for row in rows:
+ for m in pattern.finditer(row):
+ mask[m.start() : m.end()] = 1
+ shifted = np.roll(mask, 1)
+ shifted[0] = 0
+ edges = np.where((mask ^ shifted) == 1)[0]
+ edge_pairs = list(zip(edges[::2], edges[1::2]))
+ return edge_pairs
+
+ def __next__(self) -> list[str]:
+ # Argument 1 to "next" has incompatible type "Union[IO[str],
+ # ReadCsvBuffer[str]]"; expected "SupportsNext[str]"
+ if self.buffer is not None:
+ try:
+ line = next(self.buffer)
+ except StopIteration:
+ self.buffer = None
+ line = next(self.f) # type: ignore[arg-type]
+ else:
+ line = next(self.f) # type: ignore[arg-type]
+ # Note: 'colspecs' is a sequence of half-open intervals.
+ return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs]
+
+
+class FixedWidthFieldParser(PythonParser):
+ """
+ Specialization that Converts fixed-width fields into DataFrames.
+ See PythonParser for details.
+ """
+
+ def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None:
+ # Support iterators, convert to a list.
+ self.colspecs = kwds.pop("colspecs")
+ self.infer_nrows = kwds.pop("infer_nrows")
+ PythonParser.__init__(self, f, **kwds)
+
+ def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader:
+ return FixedWidthReader(
+ f,
+ self.colspecs,
+ self.delimiter,
+ self.comment,
+ self.skiprows,
+ self.infer_nrows,
+ )
+
+ def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ """
+ Returns the list of lines without the empty ones. With fixed-width
+ fields, empty lines become arrays of empty strings.
+
+ See PythonParser._remove_empty_lines.
+ """
+ return [
+ line
+ for line in lines
+ if any(not isinstance(e, str) or e.strip() for e in line)
+ ]
+
+
+def count_empty_vals(vals) -> int:
+ return sum(1 for v in vals if v == "" or v is None)
+
+
+def _validate_skipfooter_arg(skipfooter: int) -> int:
+ """
+ Validate the 'skipfooter' parameter.
+
+ Checks whether 'skipfooter' is a non-negative integer.
+ Raises a ValueError if that is not the case.
+
+ Parameters
+ ----------
+ skipfooter : non-negative integer
+ The number of rows to skip at the end of the file.
+
+ Returns
+ -------
+ validated_skipfooter : non-negative integer
+ The original input if the validation succeeds.
+
+ Raises
+ ------
+ ValueError : 'skipfooter' was not a non-negative integer.
+ """
+ if not is_integer(skipfooter):
+ raise ValueError("skipfooter must be an integer")
+
+ if skipfooter < 0:
+ raise ValueError("skipfooter cannot be negative")
+
+ # Incompatible return value type (got "Union[int, integer[Any]]", expected "int")
+ return skipfooter # type: ignore[return-value]
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/readers.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/readers.py
new file mode 100644
index 0000000000000000000000000000000000000000..e04f27b56061030d19081d87439f0461fa53cc76
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/parsers/readers.py
@@ -0,0 +1,2383 @@
+"""
+Module contains tools for processing files into DataFrames or other objects
+
+GH#48849 provides a convenient way of deprecating keyword arguments
+"""
+from __future__ import annotations
+
+from collections import (
+ abc,
+ defaultdict,
+)
+import csv
+import sys
+from textwrap import fill
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Literal,
+ NamedTuple,
+ TypedDict,
+ overload,
+)
+import warnings
+
+import numpy as np
+
+from pandas._config import using_copy_on_write
+
+from pandas._libs import lib
+from pandas._libs.parsers import STR_NA_VALUES
+from pandas.errors import (
+ AbstractMethodError,
+ ParserWarning,
+)
+from pandas.util._decorators import Appender
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.common import (
+ is_file_like,
+ is_float,
+ is_hashable,
+ is_integer,
+ is_list_like,
+ pandas_dtype,
+)
+
+from pandas import Series
+from pandas.core.frame import DataFrame
+from pandas.core.indexes.api import RangeIndex
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.common import (
+ IOHandles,
+ get_handle,
+ stringify_path,
+ validate_header_arg,
+)
+from pandas.io.parsers.arrow_parser_wrapper import ArrowParserWrapper
+from pandas.io.parsers.base_parser import (
+ ParserBase,
+ is_index_col,
+ parser_defaults,
+)
+from pandas.io.parsers.c_parser_wrapper import CParserWrapper
+from pandas.io.parsers.python_parser import (
+ FixedWidthFieldParser,
+ PythonParser,
+)
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+ )
+ from types import TracebackType
+
+ from pandas._typing import (
+ CompressionOptions,
+ CSVEngine,
+ DtypeArg,
+ DtypeBackend,
+ FilePath,
+ IndexLabel,
+ ReadCsvBuffer,
+ Self,
+ StorageOptions,
+ UsecolsArgType,
+ )
+_doc_read_csv_and_table = (
+ r"""
+{summary}
+
+Also supports optionally iterating or breaking of the file
+into chunks.
+
+Additional help can be found in the online docs for
+`IO Tools `_.
+
+Parameters
+----------
+filepath_or_buffer : str, path object or file-like object
+ Any valid string path is acceptable. The string could be a URL. Valid
+ URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
+ expected. A local file could be: file://localhost/path/to/table.csv.
+
+ If you want to pass in a path object, pandas accepts any ``os.PathLike``.
+
+ By file-like object, we refer to objects with a ``read()`` method, such as
+ a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
+sep : str, default {_default_sep}
+ Character or regex pattern to treat as the delimiter. If ``sep=None``, the
+ C engine cannot automatically detect
+ the separator, but the Python parsing engine can, meaning the latter will
+ be used and automatically detect the separator from only the first valid
+ row of the file by Python's builtin sniffer tool, ``csv.Sniffer``.
+ In addition, separators longer than 1 character and different from
+ ``'\s+'`` will be interpreted as regular expressions and will also force
+ the use of the Python parsing engine. Note that regex delimiters are prone
+ to ignoring quoted data. Regex example: ``'\r\t'``.
+delimiter : str, optional
+ Alias for ``sep``.
+header : int, Sequence of int, 'infer' or None, default 'infer'
+ Row number(s) containing column labels and marking the start of the
+ data (zero-indexed). Default behavior is to infer the column names: if no ``names``
+ are passed the behavior is identical to ``header=0`` and column
+ names are inferred from the first line of the file, if column
+ names are passed explicitly to ``names`` then the behavior is identical to
+ ``header=None``. Explicitly pass ``header=0`` to be able to
+ replace existing names. The header can be a list of integers that
+ specify row locations for a :class:`~pandas.MultiIndex` on the columns
+ e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be
+ skipped (e.g. 2 in this example is skipped). Note that this
+ parameter ignores commented lines and empty lines if
+ ``skip_blank_lines=True``, so ``header=0`` denotes the first line of
+ data rather than the first line of the file.
+names : Sequence of Hashable, optional
+ Sequence of column labels to apply. If the file contains a header row,
+ then you should explicitly pass ``header=0`` to override the column names.
+ Duplicates in this list are not allowed.
+index_col : Hashable, Sequence of Hashable or False, optional
+ Column(s) to use as row label(s), denoted either by column labels or column
+ indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex`
+ will be formed for the row labels.
+
+ Note: ``index_col=False`` can be used to force pandas to *not* use the first
+ column as the index, e.g., when you have a malformed file with delimiters at
+ the end of each line.
+usecols : Sequence of Hashable or Callable, optional
+ Subset of columns to select, denoted either by column labels or column indices.
+ If list-like, all elements must either
+ be positional (i.e. integer indices into the document columns) or strings
+ that correspond to column names provided either by the user in ``names`` or
+ inferred from the document header row(s). If ``names`` are given, the document
+ header row(s) are not taken into account. For example, a valid list-like
+ ``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
+ Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
+ To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order
+ preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]``
+ for columns in ``['foo', 'bar']`` order or
+ ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
+ for ``['bar', 'foo']`` order.
+
+ If callable, the callable function will be evaluated against the column
+ names, returning names where the callable function evaluates to ``True``. An
+ example of a valid callable argument would be ``lambda x: x.upper() in
+ ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
+ parsing time and lower memory usage.
+dtype : dtype or dict of {{Hashable : dtype}}, optional
+ Data type(s) to apply to either the whole dataset or individual columns.
+ E.g., ``{{'a': np.float64, 'b': np.int32, 'c': 'Int64'}}``
+ Use ``str`` or ``object`` together with suitable ``na_values`` settings
+ to preserve and not interpret ``dtype``.
+ If ``converters`` are specified, they will be applied INSTEAD
+ of ``dtype`` conversion.
+
+ .. versionadded:: 1.5.0
+
+ Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where
+ the default determines the ``dtype`` of the columns which are not explicitly
+ listed.
+engine : {{'c', 'python', 'pyarrow'}}, optional
+ Parser engine to use. The C and pyarrow engines are faster, while the python engine
+ is currently more feature-complete. Multithreading is currently only supported by
+ the pyarrow engine.
+
+ .. versionadded:: 1.4.0
+
+ The 'pyarrow' engine was added as an *experimental* engine, and some features
+ are unsupported, or may not work correctly, with this engine.
+converters : dict of {{Hashable : Callable}}, optional
+ Functions for converting values in specified columns. Keys can either
+ be column labels or column indices.
+true_values : list, optional
+ Values to consider as ``True`` in addition to case-insensitive variants of 'True'.
+false_values : list, optional
+ Values to consider as ``False`` in addition to case-insensitive variants of 'False'.
+skipinitialspace : bool, default False
+ Skip spaces after delimiter.
+skiprows : int, list of int or Callable, optional
+ Line numbers to skip (0-indexed) or number of lines to skip (``int``)
+ at the start of the file.
+
+ If callable, the callable function will be evaluated against the row
+ indices, returning ``True`` if the row should be skipped and ``False`` otherwise.
+ An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
+skipfooter : int, default 0
+ Number of lines at bottom of file to skip (Unsupported with ``engine='c'``).
+nrows : int, optional
+ Number of rows of file to read. Useful for reading pieces of large files.
+na_values : Hashable, Iterable of Hashable or dict of {{Hashable : Iterable}}, optional
+ Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific
+ per-column ``NA`` values. By default the following values are interpreted as
+ ``NaN``: " """
+ + fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ + """ ".
+
+keep_default_na : bool, default True
+ Whether or not to include the default ``NaN`` values when parsing the data.
+ Depending on whether ``na_values`` is passed in, the behavior is as follows:
+
+ * If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values``
+ is appended to the default ``NaN`` values used for parsing.
+ * If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only
+ the default ``NaN`` values are used for parsing.
+ * If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only
+ the ``NaN`` values specified ``na_values`` are used for parsing.
+ * If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no
+ strings will be parsed as ``NaN``.
+
+ Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and
+ ``na_values`` parameters will be ignored.
+na_filter : bool, default True
+ Detect missing value markers (empty strings and the value of ``na_values``). In
+ data without any ``NA`` values, passing ``na_filter=False`` can improve the
+ performance of reading a large file.
+verbose : bool, default False
+ Indicate number of ``NA`` values placed in non-numeric columns.
+
+ .. deprecated:: 2.2.0
+skip_blank_lines : bool, default True
+ If ``True``, skip over blank lines rather than interpreting as ``NaN`` values.
+parse_dates : bool, list of Hashable, list of lists or dict of {{Hashable : list}}, \
+default False
+ The behavior is as follows:
+
+ * ``bool``. If ``True`` -> try parsing the index. Note: Automatically set to
+ ``True`` if ``date_format`` or ``date_parser`` arguments have been passed.
+ * ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3
+ each as a separate date column.
+ * ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse
+ as a single date column. Values are joined with a space before parsing.
+ * ``dict``, e.g. ``{{'foo' : [1, 3]}}`` -> parse columns 1, 3 as date and call
+ result 'foo'. Values are joined with a space before parsing.
+
+ If a column or index cannot be represented as an array of ``datetime``,
+ say because of an unparsable value or a mixture of timezones, the column
+ or index will be returned unaltered as an ``object`` data type. For
+ non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after
+ :func:`~pandas.read_csv`.
+
+ Note: A fast-path exists for iso8601-formatted dates.
+infer_datetime_format : bool, default False
+ If ``True`` and ``parse_dates`` is enabled, pandas will attempt to infer the
+ format of the ``datetime`` strings in the columns, and if it can be inferred,
+ switch to a faster method of parsing them. In some cases this can increase
+ the parsing speed by 5-10x.
+
+ .. deprecated:: 2.0.0
+ A strict version of this argument is now the default, passing it has no effect.
+
+keep_date_col : bool, default False
+ If ``True`` and ``parse_dates`` specifies combining multiple columns then
+ keep the original columns.
+date_parser : Callable, optional
+ Function to use for converting a sequence of string columns to an array of
+ ``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the
+ conversion. pandas will try to call ``date_parser`` in three different ways,
+ advancing to the next if an exception occurs: 1) Pass one or more arrays
+ (as defined by ``parse_dates``) as arguments; 2) concatenate (row-wise) the
+ string values from the columns defined by ``parse_dates`` into a single array
+ and pass that; and 3) call ``date_parser`` once for each row using one or
+ more strings (corresponding to the columns defined by ``parse_dates``) as
+ arguments.
+
+ .. deprecated:: 2.0.0
+ Use ``date_format`` instead, or read in as ``object`` and then apply
+ :func:`~pandas.to_datetime` as-needed.
+date_format : str or dict of column -> format, optional
+ Format to use for parsing dates when used in conjunction with ``parse_dates``.
+ The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See
+ `strftime documentation
+ `_ for more information on choices, though
+ note that :const:`"%f"` will parse all the way up to nanoseconds.
+ You can also pass:
+
+ - "ISO8601", to parse any `ISO8601 `_
+ time string (not necessarily in exactly the same format);
+ - "mixed", to infer the format for each element individually. This is risky,
+ and you should probably use it along with `dayfirst`.
+
+ .. versionadded:: 2.0.0
+dayfirst : bool, default False
+ DD/MM format dates, international and European format.
+cache_dates : bool, default True
+ If ``True``, use a cache of unique, converted dates to apply the ``datetime``
+ conversion. May produce significant speed-up when parsing duplicate
+ date strings, especially ones with timezone offsets.
+
+iterator : bool, default False
+ Return ``TextFileReader`` object for iteration or getting chunks with
+ ``get_chunk()``.
+chunksize : int, optional
+ Number of lines to read from the file per chunk. Passing a value will cause the
+ function to return a ``TextFileReader`` object for iteration.
+ See the `IO Tools docs
+ `_
+ for more information on ``iterator`` and ``chunksize``.
+
+{decompression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+thousands : str (length 1), optional
+ Character acting as the thousands separator in numerical values.
+decimal : str (length 1), default '.'
+ Character to recognize as decimal point (e.g., use ',' for European data).
+lineterminator : str (length 1), optional
+ Character used to denote a line break. Only valid with C parser.
+quotechar : str (length 1), optional
+ Character used to denote the start and end of a quoted item. Quoted
+ items can include the ``delimiter`` and it will be ignored.
+quoting : {{0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, \
+3 or csv.QUOTE_NONE}}, default csv.QUOTE_MINIMAL
+ Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is
+ ``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special
+ characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``,
+ or ``lineterminator``.
+doublequote : bool, default True
+ When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate
+ whether or not to interpret two consecutive ``quotechar`` elements INSIDE a
+ field as a single ``quotechar`` element.
+escapechar : str (length 1), optional
+ Character used to escape other characters.
+comment : str (length 1), optional
+ Character indicating that the remainder of line should not be parsed.
+ If found at the beginning
+ of a line, the line will be ignored altogether. This parameter must be a
+ single character. Like empty lines (as long as ``skip_blank_lines=True``),
+ fully commented lines are ignored by the parameter ``header`` but not by
+ ``skiprows``. For example, if ``comment='#'``, parsing
+ ``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being
+ treated as the header.
+encoding : str, optional, default 'utf-8'
+ Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python
+ standard encodings
+ `_ .
+
+encoding_errors : str, optional, default 'strict'
+ How encoding errors are treated. `List of possible values
+ `_ .
+
+ .. versionadded:: 1.3.0
+
+dialect : str or csv.Dialect, optional
+ If provided, this parameter will override values (default or not) for the
+ following parameters: ``delimiter``, ``doublequote``, ``escapechar``,
+ ``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to
+ override values, a ``ParserWarning`` will be issued. See ``csv.Dialect``
+ documentation for more details.
+on_bad_lines : {{'error', 'warn', 'skip'}} or Callable, default 'error'
+ Specifies what to do upon encountering a bad line (a line with too many fields).
+ Allowed values are :
+
+ - ``'error'``, raise an Exception when a bad line is encountered.
+ - ``'warn'``, raise a warning when a bad line is encountered and skip that line.
+ - ``'skip'``, skip bad lines without raising or warning when they are encountered.
+
+ .. versionadded:: 1.3.0
+
+ .. versionadded:: 1.4.0
+
+ - Callable, function with signature
+ ``(bad_line: list[str]) -> list[str] | None`` that will process a single
+ bad line. ``bad_line`` is a list of strings split by the ``sep``.
+ If the function returns ``None``, the bad line will be ignored.
+ If the function returns a new ``list`` of strings with more elements than
+ expected, a ``ParserWarning`` will be emitted while dropping extra elements.
+ Only supported when ``engine='python'``
+
+ .. versionchanged:: 2.2.0
+
+ - Callable, function with signature
+ as described in `pyarrow documentation
+ `_ when ``engine='pyarrow'``
+
+delim_whitespace : bool, default False
+ Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be
+ used as the ``sep`` delimiter. Equivalent to setting ``sep='\\s+'``. If this option
+ is set to ``True``, nothing should be passed in for the ``delimiter``
+ parameter.
+
+ .. deprecated:: 2.2.0
+ Use ``sep="\\s+"`` instead.
+low_memory : bool, default True
+ Internally process the file in chunks, resulting in lower memory use
+ while parsing, but possibly mixed type inference. To ensure no mixed
+ types either set ``False``, or specify the type with the ``dtype`` parameter.
+ Note that the entire file is read into a single :class:`~pandas.DataFrame`
+ regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in
+ chunks. (Only valid with C parser).
+memory_map : bool, default False
+ If a filepath is provided for ``filepath_or_buffer``, map the file object
+ directly onto memory and access the data directly from there. Using this
+ option can improve performance because there is no longer any I/O overhead.
+float_precision : {{'high', 'legacy', 'round_trip'}}, optional
+ Specifies which converter the C engine should use for floating-point
+ values. The options are ``None`` or ``'high'`` for the ordinary converter,
+ ``'legacy'`` for the original lower precision pandas converter, and
+ ``'round_trip'`` for the round-trip converter.
+
+{storage_options}
+
+dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+Returns
+-------
+DataFrame or TextFileReader
+ A comma-separated values (csv) file is returned as two-dimensional
+ data structure with labeled axes.
+
+See Also
+--------
+DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
+{see_also_func_name} : {see_also_func_summary}
+read_fwf : Read a table of fixed-width formatted lines into DataFrame.
+
+Examples
+--------
+>>> pd.{func_name}('data.csv') # doctest: +SKIP
+"""
+)
+
+
+class _C_Parser_Defaults(TypedDict):
+ delim_whitespace: Literal[False]
+ na_filter: Literal[True]
+ low_memory: Literal[True]
+ memory_map: Literal[False]
+ float_precision: None
+
+
+_c_parser_defaults: _C_Parser_Defaults = {
+ "delim_whitespace": False,
+ "na_filter": True,
+ "low_memory": True,
+ "memory_map": False,
+ "float_precision": None,
+}
+
+
+class _Fwf_Defaults(TypedDict):
+ colspecs: Literal["infer"]
+ infer_nrows: Literal[100]
+ widths: None
+
+
+_fwf_defaults: _Fwf_Defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
+_c_unsupported = {"skipfooter"}
+_python_unsupported = {"low_memory", "float_precision"}
+_pyarrow_unsupported = {
+ "skipfooter",
+ "float_precision",
+ "chunksize",
+ "comment",
+ "nrows",
+ "thousands",
+ "memory_map",
+ "dialect",
+ "delim_whitespace",
+ "quoting",
+ "lineterminator",
+ "converters",
+ "iterator",
+ "dayfirst",
+ "verbose",
+ "skipinitialspace",
+ "low_memory",
+}
+
+
+class _DeprecationConfig(NamedTuple):
+ default_value: Any
+ msg: str | None
+
+
+@overload
+def validate_integer(name: str, val: None, min_val: int = ...) -> None:
+ ...
+
+
+@overload
+def validate_integer(name: str, val: float, min_val: int = ...) -> int:
+ ...
+
+
+@overload
+def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None:
+ ...
+
+
+def validate_integer(
+ name: str, val: int | float | None, min_val: int = 0
+) -> int | None:
+ """
+ Checks whether the 'name' parameter for parsing is either
+ an integer OR float that can SAFELY be cast to an integer
+ without losing accuracy. Raises a ValueError if that is
+ not the case.
+
+ Parameters
+ ----------
+ name : str
+ Parameter name (used for error reporting)
+ val : int or float
+ The value to check
+ min_val : int
+ Minimum allowed value (val < min_val will result in a ValueError)
+ """
+ if val is None:
+ return val
+
+ msg = f"'{name:s}' must be an integer >={min_val:d}"
+ if is_float(val):
+ if int(val) != val:
+ raise ValueError(msg)
+ val = int(val)
+ elif not (is_integer(val) and val >= min_val):
+ raise ValueError(msg)
+
+ return int(val)
+
+
+def _validate_names(names: Sequence[Hashable] | None) -> None:
+ """
+ Raise ValueError if the `names` parameter contains duplicates or has an
+ invalid data type.
+
+ Parameters
+ ----------
+ names : array-like or None
+ An array containing a list of the names used for the output DataFrame.
+
+ Raises
+ ------
+ ValueError
+ If names are not unique or are not ordered (e.g. set).
+ """
+ if names is not None:
+ if len(names) != len(set(names)):
+ raise ValueError("Duplicate names are not allowed.")
+ if not (
+ is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
+ ):
+ raise ValueError("Names should be an ordered collection.")
+
+
+def _read(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds
+) -> DataFrame | TextFileReader:
+ """Generic reader of line files."""
+ # if we pass a date_parser and parse_dates=False, we should not parse the
+ # dates GH#44366
+ if kwds.get("parse_dates", None) is None:
+ if (
+ kwds.get("date_parser", lib.no_default) is lib.no_default
+ and kwds.get("date_format", None) is None
+ ):
+ kwds["parse_dates"] = False
+ else:
+ kwds["parse_dates"] = True
+
+ # Extract some of the arguments (pass chunksize on).
+ iterator = kwds.get("iterator", False)
+ chunksize = kwds.get("chunksize", None)
+ if kwds.get("engine") == "pyarrow":
+ if iterator:
+ raise ValueError(
+ "The 'iterator' option is not supported with the 'pyarrow' engine"
+ )
+
+ if chunksize is not None:
+ raise ValueError(
+ "The 'chunksize' option is not supported with the 'pyarrow' engine"
+ )
+ else:
+ chunksize = validate_integer("chunksize", chunksize, 1)
+
+ nrows = kwds.get("nrows", None)
+
+ # Check for duplicates in names.
+ _validate_names(kwds.get("names", None))
+
+ # Create the parser.
+ parser = TextFileReader(filepath_or_buffer, **kwds)
+
+ if chunksize or iterator:
+ return parser
+
+ with parser:
+ return parser.read(nrows)
+
+
+# iterator=True -> TextFileReader
+@overload
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] | None = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: Literal[True],
+ chunksize: int | None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool | lib.NoDefault = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: Literal["high", "legacy"] | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> TextFileReader:
+ ...
+
+
+# chunksize=int -> TextFileReader
+@overload
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] | None = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: bool = ...,
+ chunksize: int,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool | lib.NoDefault = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: Literal["high", "legacy"] | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> TextFileReader:
+ ...
+
+
+# default case -> DataFrame
+@overload
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] | None = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: Literal[False] = ...,
+ chunksize: None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool | lib.NoDefault = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: Literal["high", "legacy"] | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame:
+ ...
+
+
+# Unions -> DataFrame | TextFileReader
+@overload
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] | None = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: bool = ...,
+ chunksize: int | None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool | lib.NoDefault = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: Literal["high", "legacy"] | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame | TextFileReader:
+ ...
+
+
+@Appender(
+ _doc_read_csv_and_table.format(
+ func_name="read_csv",
+ summary="Read a comma-separated values (csv) file into DataFrame.",
+ see_also_func_name="read_table",
+ see_also_func_summary="Read general delimited file into DataFrame.",
+ _default_sep="','",
+ storage_options=_shared_docs["storage_options"],
+ decompression_options=_shared_docs["decompression_options"]
+ % "filepath_or_buffer",
+ )
+)
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = lib.no_default,
+ delimiter: str | None | lib.NoDefault = None,
+ # Column and Index Locations and Names
+ header: int | Sequence[int] | None | Literal["infer"] = "infer",
+ names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,
+ index_col: IndexLabel | Literal[False] | None = None,
+ usecols: UsecolsArgType = None,
+ # General Parsing Configuration
+ dtype: DtypeArg | None = None,
+ engine: CSVEngine | None = None,
+ converters: Mapping[Hashable, Callable] | None = None,
+ true_values: list | None = None,
+ false_values: list | None = None,
+ skipinitialspace: bool = False,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = None,
+ skipfooter: int = 0,
+ nrows: int | None = None,
+ # NA and Missing Data Handling
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = None,
+ keep_default_na: bool = True,
+ na_filter: bool = True,
+ verbose: bool | lib.NoDefault = lib.no_default,
+ skip_blank_lines: bool = True,
+ # Datetime Handling
+ parse_dates: bool | Sequence[Hashable] | None = None,
+ infer_datetime_format: bool | lib.NoDefault = lib.no_default,
+ keep_date_col: bool | lib.NoDefault = lib.no_default,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: str | dict[Hashable, str] | None = None,
+ dayfirst: bool = False,
+ cache_dates: bool = True,
+ # Iteration
+ iterator: bool = False,
+ chunksize: int | None = None,
+ # Quoting, Compression, and File Format
+ compression: CompressionOptions = "infer",
+ thousands: str | None = None,
+ decimal: str = ".",
+ lineterminator: str | None = None,
+ quotechar: str = '"',
+ quoting: int = csv.QUOTE_MINIMAL,
+ doublequote: bool = True,
+ escapechar: str | None = None,
+ comment: str | None = None,
+ encoding: str | None = None,
+ encoding_errors: str | None = "strict",
+ dialect: str | csv.Dialect | None = None,
+ # Error Handling
+ on_bad_lines: str = "error",
+ # Internal
+ delim_whitespace: bool | lib.NoDefault = lib.no_default,
+ low_memory: bool = _c_parser_defaults["low_memory"],
+ memory_map: bool = False,
+ float_precision: Literal["high", "legacy"] | None = None,
+ storage_options: StorageOptions | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame | TextFileReader:
+ if keep_date_col is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'keep_date_col' keyword in pd.read_csv is deprecated and "
+ "will be removed in a future version. Explicitly remove unwanted "
+ "columns after parsing instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ keep_date_col = False
+
+ if lib.is_list_like(parse_dates):
+ # GH#55569
+ depr = False
+ # error: Item "bool" of "bool | Sequence[Hashable] | None" has no
+ # attribute "__iter__" (not iterable)
+ if not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr]
+ depr = True
+ elif isinstance(parse_dates, dict) and any(
+ lib.is_list_like(x) for x in parse_dates.values()
+ ):
+ depr = True
+ if depr:
+ warnings.warn(
+ "Support for nested sequences for 'parse_dates' in pd.read_csv "
+ "is deprecated. Combine the desired columns with pd.to_datetime "
+ "after parsing instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ if infer_datetime_format is not lib.no_default:
+ warnings.warn(
+ "The argument 'infer_datetime_format' is deprecated and will "
+ "be removed in a future version. "
+ "A strict version of it is now the default, see "
+ "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
+ "You can safely remove this argument.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ if delim_whitespace is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'delim_whitespace' keyword in pd.read_csv is deprecated and "
+ "will be removed in a future version. Use ``sep='\\s+'`` instead",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ delim_whitespace = False
+
+ if verbose is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'verbose' keyword in pd.read_csv is deprecated and "
+ "will be removed in a future version.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ verbose = False
+
+ # locals() should never be modified
+ kwds = locals().copy()
+ del kwds["filepath_or_buffer"]
+ del kwds["sep"]
+
+ kwds_defaults = _refine_defaults_read(
+ dialect,
+ delimiter,
+ delim_whitespace,
+ engine,
+ sep,
+ on_bad_lines,
+ names,
+ defaults={"delimiter": ","},
+ dtype_backend=dtype_backend,
+ )
+ kwds.update(kwds_defaults)
+
+ return _read(filepath_or_buffer, kwds)
+
+
+# iterator=True -> TextFileReader
+@overload
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: Literal[True],
+ chunksize: int | None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: str | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> TextFileReader:
+ ...
+
+
+# chunksize=int -> TextFileReader
+@overload
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: bool = ...,
+ chunksize: int,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: str | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> TextFileReader:
+ ...
+
+
+# default -> DataFrame
+@overload
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: Literal[False] = ...,
+ chunksize: None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: str | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame:
+ ...
+
+
+# Unions -> DataFrame | TextFileReader
+@overload
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: bool = ...,
+ chunksize: int | None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: str | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame | TextFileReader:
+ ...
+
+
+@Appender(
+ _doc_read_csv_and_table.format(
+ func_name="read_table",
+ summary="Read general delimited file into DataFrame.",
+ see_also_func_name="read_csv",
+ see_also_func_summary=(
+ "Read a comma-separated values (csv) file into DataFrame."
+ ),
+ _default_sep=r"'\\t' (tab-stop)",
+ storage_options=_shared_docs["storage_options"],
+ decompression_options=_shared_docs["decompression_options"]
+ % "filepath_or_buffer",
+ )
+)
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = lib.no_default,
+ delimiter: str | None | lib.NoDefault = None,
+ # Column and Index Locations and Names
+ header: int | Sequence[int] | None | Literal["infer"] = "infer",
+ names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,
+ index_col: IndexLabel | Literal[False] | None = None,
+ usecols: UsecolsArgType = None,
+ # General Parsing Configuration
+ dtype: DtypeArg | None = None,
+ engine: CSVEngine | None = None,
+ converters: Mapping[Hashable, Callable] | None = None,
+ true_values: list | None = None,
+ false_values: list | None = None,
+ skipinitialspace: bool = False,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = None,
+ skipfooter: int = 0,
+ nrows: int | None = None,
+ # NA and Missing Data Handling
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None,
+ keep_default_na: bool = True,
+ na_filter: bool = True,
+ verbose: bool | lib.NoDefault = lib.no_default,
+ skip_blank_lines: bool = True,
+ # Datetime Handling
+ parse_dates: bool | Sequence[Hashable] = False,
+ infer_datetime_format: bool | lib.NoDefault = lib.no_default,
+ keep_date_col: bool | lib.NoDefault = lib.no_default,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: str | dict[Hashable, str] | None = None,
+ dayfirst: bool = False,
+ cache_dates: bool = True,
+ # Iteration
+ iterator: bool = False,
+ chunksize: int | None = None,
+ # Quoting, Compression, and File Format
+ compression: CompressionOptions = "infer",
+ thousands: str | None = None,
+ decimal: str = ".",
+ lineterminator: str | None = None,
+ quotechar: str = '"',
+ quoting: int = csv.QUOTE_MINIMAL,
+ doublequote: bool = True,
+ escapechar: str | None = None,
+ comment: str | None = None,
+ encoding: str | None = None,
+ encoding_errors: str | None = "strict",
+ dialect: str | csv.Dialect | None = None,
+ # Error Handling
+ on_bad_lines: str = "error",
+ # Internal
+ delim_whitespace: bool | lib.NoDefault = lib.no_default,
+ low_memory: bool = _c_parser_defaults["low_memory"],
+ memory_map: bool = False,
+ float_precision: str | None = None,
+ storage_options: StorageOptions | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame | TextFileReader:
+ if keep_date_col is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'keep_date_col' keyword in pd.read_table is deprecated and "
+ "will be removed in a future version. Explicitly remove unwanted "
+ "columns after parsing instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ keep_date_col = False
+
+ # error: Item "bool" of "bool | Sequence[Hashable]" has no attribute "__iter__"
+ if lib.is_list_like(parse_dates) and not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr]
+ # GH#55569
+ warnings.warn(
+ "Support for nested sequences for 'parse_dates' in pd.read_table "
+ "is deprecated. Combine the desired columns with pd.to_datetime "
+ "after parsing instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ if infer_datetime_format is not lib.no_default:
+ warnings.warn(
+ "The argument 'infer_datetime_format' is deprecated and will "
+ "be removed in a future version. "
+ "A strict version of it is now the default, see "
+ "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
+ "You can safely remove this argument.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ if delim_whitespace is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'delim_whitespace' keyword in pd.read_table is deprecated and "
+ "will be removed in a future version. Use ``sep='\\s+'`` instead",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ delim_whitespace = False
+
+ if verbose is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'verbose' keyword in pd.read_table is deprecated and "
+ "will be removed in a future version.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ verbose = False
+
+ # locals() should never be modified
+ kwds = locals().copy()
+ del kwds["filepath_or_buffer"]
+ del kwds["sep"]
+
+ kwds_defaults = _refine_defaults_read(
+ dialect,
+ delimiter,
+ delim_whitespace,
+ engine,
+ sep,
+ on_bad_lines,
+ names,
+ defaults={"delimiter": "\t"},
+ dtype_backend=dtype_backend,
+ )
+ kwds.update(kwds_defaults)
+
+ return _read(filepath_or_buffer, kwds)
+
+
+@overload
+def read_fwf(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ colspecs: Sequence[tuple[int, int]] | str | None = ...,
+ widths: Sequence[int] | None = ...,
+ infer_nrows: int = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ iterator: Literal[True],
+ chunksize: int | None = ...,
+ **kwds,
+) -> TextFileReader:
+ ...
+
+
+@overload
+def read_fwf(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ colspecs: Sequence[tuple[int, int]] | str | None = ...,
+ widths: Sequence[int] | None = ...,
+ infer_nrows: int = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ iterator: bool = ...,
+ chunksize: int,
+ **kwds,
+) -> TextFileReader:
+ ...
+
+
+@overload
+def read_fwf(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ colspecs: Sequence[tuple[int, int]] | str | None = ...,
+ widths: Sequence[int] | None = ...,
+ infer_nrows: int = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ iterator: Literal[False] = ...,
+ chunksize: None = ...,
+ **kwds,
+) -> DataFrame:
+ ...
+
+
+def read_fwf(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ colspecs: Sequence[tuple[int, int]] | str | None = "infer",
+ widths: Sequence[int] | None = None,
+ infer_nrows: int = 100,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ iterator: bool = False,
+ chunksize: int | None = None,
+ **kwds,
+) -> DataFrame | TextFileReader:
+ r"""
+ Read a table of fixed-width formatted lines into DataFrame.
+
+ Also supports optionally iterating or breaking of the file
+ into chunks.
+
+ Additional help can be found in the `online docs for IO Tools
+ `_.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a text ``read()`` function.The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be:
+ ``file://localhost/path/to/table.csv``.
+ colspecs : list of tuple (int, int) or 'infer'. optional
+ A list of tuples giving the extents of the fixed-width
+ fields of each line as half-open intervals (i.e., [from, to[ ).
+ String value 'infer' can be used to instruct the parser to try
+ detecting the column specifications from the first 100 rows of
+ the data which are not being skipped via skiprows (default='infer').
+ widths : list of int, optional
+ A list of field widths which can be used instead of 'colspecs' if
+ the intervals are contiguous.
+ infer_nrows : int, default 100
+ The number of rows to consider when letting the parser determine the
+ `colspecs`.
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ **kwds : optional
+ Optional keyword arguments can be passed to ``TextFileReader``.
+
+ Returns
+ -------
+ DataFrame or TextFileReader
+ A comma-separated values (csv) file is returned as two-dimensional
+ data structure with labeled axes.
+
+ See Also
+ --------
+ DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
+ read_csv : Read a comma-separated values (csv) file into DataFrame.
+
+ Examples
+ --------
+ >>> pd.read_fwf('data.csv') # doctest: +SKIP
+ """
+ # Check input arguments.
+ if colspecs is None and widths is None:
+ raise ValueError("Must specify either colspecs or widths")
+ if colspecs not in (None, "infer") and widths is not None:
+ raise ValueError("You must specify only one of 'widths' and 'colspecs'")
+
+ # Compute 'colspecs' from 'widths', if specified.
+ if widths is not None:
+ colspecs, col = [], 0
+ for w in widths:
+ colspecs.append((col, col + w))
+ col += w
+
+ # for mypy
+ assert colspecs is not None
+
+ # GH#40830
+ # Ensure length of `colspecs` matches length of `names`
+ names = kwds.get("names")
+ if names is not None:
+ if len(names) != len(colspecs) and colspecs != "infer":
+ # need to check len(index_col) as it might contain
+ # unnamed indices, in which case it's name is not required
+ len_index = 0
+ if kwds.get("index_col") is not None:
+ index_col: Any = kwds.get("index_col")
+ if index_col is not False:
+ if not is_list_like(index_col):
+ len_index = 1
+ else:
+ len_index = len(index_col)
+ if kwds.get("usecols") is None and len(names) + len_index != len(colspecs):
+ # If usecols is used colspec may be longer than names
+ raise ValueError("Length of colspecs must match length of names")
+
+ kwds["colspecs"] = colspecs
+ kwds["infer_nrows"] = infer_nrows
+ kwds["engine"] = "python-fwf"
+ kwds["iterator"] = iterator
+ kwds["chunksize"] = chunksize
+
+ check_dtype_backend(dtype_backend)
+ kwds["dtype_backend"] = dtype_backend
+ return _read(filepath_or_buffer, kwds)
+
+
+class TextFileReader(abc.Iterator):
+ """
+
+ Passed dialect overrides any of the related parser options
+
+ """
+
+ def __init__(
+ self,
+ f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list,
+ engine: CSVEngine | None = None,
+ **kwds,
+ ) -> None:
+ if engine is not None:
+ engine_specified = True
+ else:
+ engine = "python"
+ engine_specified = False
+ self.engine = engine
+ self._engine_specified = kwds.get("engine_specified", engine_specified)
+
+ _validate_skipfooter(kwds)
+
+ dialect = _extract_dialect(kwds)
+ if dialect is not None:
+ if engine == "pyarrow":
+ raise ValueError(
+ "The 'dialect' option is not supported with the 'pyarrow' engine"
+ )
+ kwds = _merge_with_dialect_properties(dialect, kwds)
+
+ if kwds.get("header", "infer") == "infer":
+ kwds["header"] = 0 if kwds.get("names") is None else None
+
+ self.orig_options = kwds
+
+ # miscellanea
+ self._currow = 0
+
+ options = self._get_options_with_defaults(engine)
+ options["storage_options"] = kwds.get("storage_options", None)
+
+ self.chunksize = options.pop("chunksize", None)
+ self.nrows = options.pop("nrows", None)
+
+ self._check_file_or_buffer(f, engine)
+ self.options, self.engine = self._clean_options(options, engine)
+
+ if "has_index_names" in kwds:
+ self.options["has_index_names"] = kwds["has_index_names"]
+
+ self.handles: IOHandles | None = None
+ self._engine = self._make_engine(f, self.engine)
+
+ def close(self) -> None:
+ if self.handles is not None:
+ self.handles.close()
+ self._engine.close()
+
+ def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]:
+ kwds = self.orig_options
+
+ options = {}
+ default: object | None
+
+ for argname, default in parser_defaults.items():
+ value = kwds.get(argname, default)
+
+ # see gh-12935
+ if (
+ engine == "pyarrow"
+ and argname in _pyarrow_unsupported
+ and value != default
+ and value != getattr(value, "value", default)
+ ):
+ raise ValueError(
+ f"The {repr(argname)} option is not supported with the "
+ f"'pyarrow' engine"
+ )
+ options[argname] = value
+
+ for argname, default in _c_parser_defaults.items():
+ if argname in kwds:
+ value = kwds[argname]
+
+ if engine != "c" and value != default:
+ # TODO: Refactor this logic, its pretty convoluted
+ if "python" in engine and argname not in _python_unsupported:
+ pass
+ elif "pyarrow" in engine and argname not in _pyarrow_unsupported:
+ pass
+ else:
+ raise ValueError(
+ f"The {repr(argname)} option is not supported with the "
+ f"{repr(engine)} engine"
+ )
+ else:
+ value = default
+ options[argname] = value
+
+ if engine == "python-fwf":
+ for argname, default in _fwf_defaults.items():
+ options[argname] = kwds.get(argname, default)
+
+ return options
+
+ def _check_file_or_buffer(self, f, engine: CSVEngine) -> None:
+ # see gh-16530
+ if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"):
+ # The C engine doesn't need the file-like to have the "__iter__"
+ # attribute. However, the Python engine needs "__iter__(...)"
+ # when iterating through such an object, meaning it
+ # needs to have that attribute
+ raise ValueError(
+ "The 'python' engine cannot iterate through this file buffer."
+ )
+
+ def _clean_options(
+ self, options: dict[str, Any], engine: CSVEngine
+ ) -> tuple[dict[str, Any], CSVEngine]:
+ result = options.copy()
+
+ fallback_reason = None
+
+ # C engine not supported yet
+ if engine == "c":
+ if options["skipfooter"] > 0:
+ fallback_reason = "the 'c' engine does not support skipfooter"
+ engine = "python"
+
+ sep = options["delimiter"]
+ delim_whitespace = options["delim_whitespace"]
+
+ if sep is None and not delim_whitespace:
+ if engine in ("c", "pyarrow"):
+ fallback_reason = (
+ f"the '{engine}' engine does not support "
+ "sep=None with delim_whitespace=False"
+ )
+ engine = "python"
+ elif sep is not None and len(sep) > 1:
+ if engine == "c" and sep == r"\s+":
+ result["delim_whitespace"] = True
+ del result["delimiter"]
+ elif engine not in ("python", "python-fwf"):
+ # wait until regex engine integrated
+ fallback_reason = (
+ f"the '{engine}' engine does not support "
+ "regex separators (separators > 1 char and "
+ r"different from '\s+' are interpreted as regex)"
+ )
+ engine = "python"
+ elif delim_whitespace:
+ if "python" in engine:
+ result["delimiter"] = r"\s+"
+ elif sep is not None:
+ encodeable = True
+ encoding = sys.getfilesystemencoding() or "utf-8"
+ try:
+ if len(sep.encode(encoding)) > 1:
+ encodeable = False
+ except UnicodeDecodeError:
+ encodeable = False
+ if not encodeable and engine not in ("python", "python-fwf"):
+ fallback_reason = (
+ f"the separator encoded in {encoding} "
+ f"is > 1 char long, and the '{engine}' engine "
+ "does not support such separators"
+ )
+ engine = "python"
+
+ quotechar = options["quotechar"]
+ if quotechar is not None and isinstance(quotechar, (str, bytes)):
+ if (
+ len(quotechar) == 1
+ and ord(quotechar) > 127
+ and engine not in ("python", "python-fwf")
+ ):
+ fallback_reason = (
+ "ord(quotechar) > 127, meaning the "
+ "quotechar is larger than one byte, "
+ f"and the '{engine}' engine does not support such quotechars"
+ )
+ engine = "python"
+
+ if fallback_reason and self._engine_specified:
+ raise ValueError(fallback_reason)
+
+ if engine == "c":
+ for arg in _c_unsupported:
+ del result[arg]
+
+ if "python" in engine:
+ for arg in _python_unsupported:
+ if fallback_reason and result[arg] != _c_parser_defaults.get(arg):
+ raise ValueError(
+ "Falling back to the 'python' engine because "
+ f"{fallback_reason}, but this causes {repr(arg)} to be "
+ "ignored as it is not supported by the 'python' engine."
+ )
+ del result[arg]
+
+ if fallback_reason:
+ warnings.warn(
+ (
+ "Falling back to the 'python' engine because "
+ f"{fallback_reason}; you can avoid this warning by specifying "
+ "engine='python'."
+ ),
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ index_col = options["index_col"]
+ names = options["names"]
+ converters = options["converters"]
+ na_values = options["na_values"]
+ skiprows = options["skiprows"]
+
+ validate_header_arg(options["header"])
+
+ if index_col is True:
+ raise ValueError("The value of index_col couldn't be 'True'")
+ if is_index_col(index_col):
+ if not isinstance(index_col, (list, tuple, np.ndarray)):
+ index_col = [index_col]
+ result["index_col"] = index_col
+
+ names = list(names) if names is not None else names
+
+ # type conversion-related
+ if converters is not None:
+ if not isinstance(converters, dict):
+ raise TypeError(
+ "Type converters must be a dict or subclass, "
+ f"input was a {type(converters).__name__}"
+ )
+ else:
+ converters = {}
+
+ # Converting values to NA
+ keep_default_na = options["keep_default_na"]
+ floatify = engine != "pyarrow"
+ na_values, na_fvalues = _clean_na_values(
+ na_values, keep_default_na, floatify=floatify
+ )
+
+ # handle skiprows; this is internally handled by the
+ # c-engine, so only need for python and pyarrow parsers
+ if engine == "pyarrow":
+ if not is_integer(skiprows) and skiprows is not None:
+ # pyarrow expects skiprows to be passed as an integer
+ raise ValueError(
+ "skiprows argument must be an integer when using "
+ "engine='pyarrow'"
+ )
+ else:
+ if is_integer(skiprows):
+ skiprows = list(range(skiprows))
+ if skiprows is None:
+ skiprows = set()
+ elif not callable(skiprows):
+ skiprows = set(skiprows)
+
+ # put stuff back
+ result["names"] = names
+ result["converters"] = converters
+ result["na_values"] = na_values
+ result["na_fvalues"] = na_fvalues
+ result["skiprows"] = skiprows
+
+ return result, engine
+
+ def __next__(self) -> DataFrame:
+ try:
+ return self.get_chunk()
+ except StopIteration:
+ self.close()
+ raise
+
+ def _make_engine(
+ self,
+ f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list | IO,
+ engine: CSVEngine = "c",
+ ) -> ParserBase:
+ mapping: dict[str, type[ParserBase]] = {
+ "c": CParserWrapper,
+ "python": PythonParser,
+ "pyarrow": ArrowParserWrapper,
+ "python-fwf": FixedWidthFieldParser,
+ }
+ if engine not in mapping:
+ raise ValueError(
+ f"Unknown engine: {engine} (valid options are {mapping.keys()})"
+ )
+ if not isinstance(f, list):
+ # open file here
+ is_text = True
+ mode = "r"
+ if engine == "pyarrow":
+ is_text = False
+ mode = "rb"
+ elif (
+ engine == "c"
+ and self.options.get("encoding", "utf-8") == "utf-8"
+ and isinstance(stringify_path(f), str)
+ ):
+ # c engine can decode utf-8 bytes, adding TextIOWrapper makes
+ # the c-engine especially for memory_map=True far slower
+ is_text = False
+ if "b" not in mode:
+ mode += "b"
+ self.handles = get_handle(
+ f,
+ mode,
+ encoding=self.options.get("encoding", None),
+ compression=self.options.get("compression", None),
+ memory_map=self.options.get("memory_map", False),
+ is_text=is_text,
+ errors=self.options.get("encoding_errors", "strict"),
+ storage_options=self.options.get("storage_options", None),
+ )
+ assert self.handles is not None
+ f = self.handles.handle
+
+ elif engine != "python":
+ msg = f"Invalid file path or buffer object type: {type(f)}"
+ raise ValueError(msg)
+
+ try:
+ return mapping[engine](f, **self.options)
+ except Exception:
+ if self.handles is not None:
+ self.handles.close()
+ raise
+
+ def _failover_to_python(self) -> None:
+ raise AbstractMethodError(self)
+
+ def read(self, nrows: int | None = None) -> DataFrame:
+ if self.engine == "pyarrow":
+ try:
+ # error: "ParserBase" has no attribute "read"
+ df = self._engine.read() # type: ignore[attr-defined]
+ except Exception:
+ self.close()
+ raise
+ else:
+ nrows = validate_integer("nrows", nrows)
+ try:
+ # error: "ParserBase" has no attribute "read"
+ (
+ index,
+ columns,
+ col_dict,
+ ) = self._engine.read( # type: ignore[attr-defined]
+ nrows
+ )
+ except Exception:
+ self.close()
+ raise
+
+ if index is None:
+ if col_dict:
+ # Any column is actually fine:
+ new_rows = len(next(iter(col_dict.values())))
+ index = RangeIndex(self._currow, self._currow + new_rows)
+ else:
+ new_rows = 0
+ else:
+ new_rows = len(index)
+
+ if hasattr(self, "orig_options"):
+ dtype_arg = self.orig_options.get("dtype", None)
+ else:
+ dtype_arg = None
+
+ if isinstance(dtype_arg, dict):
+ dtype = defaultdict(lambda: None) # type: ignore[var-annotated]
+ dtype.update(dtype_arg)
+ elif dtype_arg is not None and pandas_dtype(dtype_arg) in (
+ np.str_,
+ np.object_,
+ ):
+ dtype = defaultdict(lambda: dtype_arg)
+ else:
+ dtype = None
+
+ if dtype is not None:
+ new_col_dict = {}
+ for k, v in col_dict.items():
+ d = (
+ dtype[k]
+ if pandas_dtype(dtype[k]) in (np.str_, np.object_)
+ else None
+ )
+ new_col_dict[k] = Series(v, index=index, dtype=d, copy=False)
+ else:
+ new_col_dict = col_dict
+
+ df = DataFrame(
+ new_col_dict,
+ columns=columns,
+ index=index,
+ copy=not using_copy_on_write(),
+ )
+
+ self._currow += new_rows
+ return df
+
+ def get_chunk(self, size: int | None = None) -> DataFrame:
+ if size is None:
+ size = self.chunksize
+ if self.nrows is not None:
+ if self._currow >= self.nrows:
+ raise StopIteration
+ size = min(size, self.nrows - self._currow)
+ return self.read(nrows=size)
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+
+def TextParser(*args, **kwds) -> TextFileReader:
+ """
+ Converts lists of lists/tuples into DataFrames with proper type inference
+ and optional (e.g. string to datetime) conversion. Also enables iterating
+ lazily over chunks of large files
+
+ Parameters
+ ----------
+ data : file-like object or list
+ delimiter : separator character to use
+ dialect : str or csv.Dialect instance, optional
+ Ignored if delimiter is longer than 1 character
+ names : sequence, default
+ header : int, default 0
+ Row to use to parse column labels. Defaults to the first row. Prior
+ rows will be discarded
+ index_col : int or list, optional
+ Column or columns to use as the (possibly hierarchical) index
+ has_index_names: bool, default False
+ True if the cols defined in index_col have an index name and are
+ not in the header.
+ na_values : scalar, str, list-like, or dict, optional
+ Additional strings to recognize as NA/NaN.
+ keep_default_na : bool, default True
+ thousands : str, optional
+ Thousands separator
+ comment : str, optional
+ Comment out remainder of line
+ parse_dates : bool, default False
+ keep_date_col : bool, default False
+ date_parser : function, optional
+
+ .. deprecated:: 2.0.0
+ date_format : str or dict of column -> format, default ``None``
+
+ .. versionadded:: 2.0.0
+ skiprows : list of integers
+ Row numbers to skip
+ skipfooter : int
+ Number of line at bottom of file to skip
+ converters : dict, optional
+ Dict of functions for converting values in certain columns. Keys can
+ either be integers or column labels, values are functions that take one
+ input argument, the cell (not column) content, and return the
+ transformed content.
+ encoding : str, optional
+ Encoding to use for UTF when reading/writing (ex. 'utf-8')
+ float_precision : str, optional
+ Specifies which converter the C engine should use for floating-point
+ values. The options are `None` or `high` for the ordinary converter,
+ `legacy` for the original lower precision pandas converter, and
+ `round_trip` for the round-trip converter.
+ """
+ kwds["engine"] = "python"
+ return TextFileReader(*args, **kwds)
+
+
+def _clean_na_values(na_values, keep_default_na: bool = True, floatify: bool = True):
+ na_fvalues: set | dict
+ if na_values is None:
+ if keep_default_na:
+ na_values = STR_NA_VALUES
+ else:
+ na_values = set()
+ na_fvalues = set()
+ elif isinstance(na_values, dict):
+ old_na_values = na_values.copy()
+ na_values = {} # Prevent aliasing.
+
+ # Convert the values in the na_values dictionary
+ # into array-likes for further use. This is also
+ # where we append the default NaN values, provided
+ # that `keep_default_na=True`.
+ for k, v in old_na_values.items():
+ if not is_list_like(v):
+ v = [v]
+
+ if keep_default_na:
+ v = set(v) | STR_NA_VALUES
+
+ na_values[k] = v
+ na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}
+ else:
+ if not is_list_like(na_values):
+ na_values = [na_values]
+ na_values = _stringify_na_values(na_values, floatify)
+ if keep_default_na:
+ na_values = na_values | STR_NA_VALUES
+
+ na_fvalues = _floatify_na_values(na_values)
+
+ return na_values, na_fvalues
+
+
+def _floatify_na_values(na_values):
+ # create float versions of the na_values
+ result = set()
+ for v in na_values:
+ try:
+ v = float(v)
+ if not np.isnan(v):
+ result.add(v)
+ except (TypeError, ValueError, OverflowError):
+ pass
+ return result
+
+
+def _stringify_na_values(na_values, floatify: bool):
+ """return a stringified and numeric for these values"""
+ result: list[str | float] = []
+ for x in na_values:
+ result.append(str(x))
+ result.append(x)
+ try:
+ v = float(x)
+
+ # we are like 999 here
+ if v == int(v):
+ v = int(v)
+ result.append(f"{v}.0")
+ result.append(str(v))
+
+ if floatify:
+ result.append(v)
+ except (TypeError, ValueError, OverflowError):
+ pass
+ if floatify:
+ try:
+ result.append(int(x))
+ except (TypeError, ValueError, OverflowError):
+ pass
+ return set(result)
+
+
+def _refine_defaults_read(
+ dialect: str | csv.Dialect | None,
+ delimiter: str | None | lib.NoDefault,
+ delim_whitespace: bool,
+ engine: CSVEngine | None,
+ sep: str | None | lib.NoDefault,
+ on_bad_lines: str | Callable,
+ names: Sequence[Hashable] | None | lib.NoDefault,
+ defaults: dict[str, Any],
+ dtype_backend: DtypeBackend | lib.NoDefault,
+):
+ """Validate/refine default values of input parameters of read_csv, read_table.
+
+ Parameters
+ ----------
+ dialect : str or csv.Dialect
+ If provided, this parameter will override values (default or not) for the
+ following parameters: `delimiter`, `doublequote`, `escapechar`,
+ `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
+ override values, a ParserWarning will be issued. See csv.Dialect
+ documentation for more details.
+ delimiter : str or object
+ Alias for sep.
+ delim_whitespace : bool
+ Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
+ used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
+ is set to True, nothing should be passed in for the ``delimiter``
+ parameter.
+
+ .. deprecated:: 2.2.0
+ Use ``sep="\\s+"`` instead.
+ engine : {{'c', 'python'}}
+ Parser engine to use. The C engine is faster while the python engine is
+ currently more feature-complete.
+ sep : str or object
+ A delimiter provided by the user (str) or a sentinel value, i.e.
+ pandas._libs.lib.no_default.
+ on_bad_lines : str, callable
+ An option for handling bad lines or a sentinel value(None).
+ names : array-like, optional
+ List of column names to use. If the file contains a header row,
+ then you should explicitly pass ``header=0`` to override the column names.
+ Duplicates in this list are not allowed.
+ defaults: dict
+ Default values of input parameters.
+
+ Returns
+ -------
+ kwds : dict
+ Input parameters with correct values.
+
+ Raises
+ ------
+ ValueError :
+ If a delimiter was specified with ``sep`` (or ``delimiter``) and
+ ``delim_whitespace=True``.
+ """
+ # fix types for sep, delimiter to Union(str, Any)
+ delim_default = defaults["delimiter"]
+ kwds: dict[str, Any] = {}
+ # gh-23761
+ #
+ # When a dialect is passed, it overrides any of the overlapping
+ # parameters passed in directly. We don't want to warn if the
+ # default parameters were passed in (since it probably means
+ # that the user didn't pass them in explicitly in the first place).
+ #
+ # "delimiter" is the annoying corner case because we alias it to
+ # "sep" before doing comparison to the dialect values later on.
+ # Thus, we need a flag to indicate that we need to "override"
+ # the comparison to dialect values by checking if default values
+ # for BOTH "delimiter" and "sep" were provided.
+ if dialect is not None:
+ kwds["sep_override"] = delimiter is None and (
+ sep is lib.no_default or sep == delim_default
+ )
+
+ if delimiter and (sep is not lib.no_default):
+ raise ValueError("Specified a sep and a delimiter; you can only specify one.")
+
+ kwds["names"] = None if names is lib.no_default else names
+
+ # Alias sep -> delimiter.
+ if delimiter is None:
+ delimiter = sep
+
+ if delim_whitespace and (delimiter is not lib.no_default):
+ raise ValueError(
+ "Specified a delimiter with both sep and "
+ "delim_whitespace=True; you can only specify one."
+ )
+
+ if delimiter == "\n":
+ raise ValueError(
+ r"Specified \n as separator or delimiter. This forces the python engine "
+ "which does not accept a line terminator. Hence it is not allowed to use "
+ "the line terminator as separator.",
+ )
+
+ if delimiter is lib.no_default:
+ # assign default separator value
+ kwds["delimiter"] = delim_default
+ else:
+ kwds["delimiter"] = delimiter
+
+ if engine is not None:
+ kwds["engine_specified"] = True
+ else:
+ kwds["engine"] = "c"
+ kwds["engine_specified"] = False
+
+ if on_bad_lines == "error":
+ kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR
+ elif on_bad_lines == "warn":
+ kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN
+ elif on_bad_lines == "skip":
+ kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP
+ elif callable(on_bad_lines):
+ if engine not in ["python", "pyarrow"]:
+ raise ValueError(
+ "on_bad_line can only be a callable function "
+ "if engine='python' or 'pyarrow'"
+ )
+ kwds["on_bad_lines"] = on_bad_lines
+ else:
+ raise ValueError(f"Argument {on_bad_lines} is invalid for on_bad_lines")
+
+ check_dtype_backend(dtype_backend)
+
+ kwds["dtype_backend"] = dtype_backend
+
+ return kwds
+
+
+def _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None:
+ """
+ Extract concrete csv dialect instance.
+
+ Returns
+ -------
+ csv.Dialect or None
+ """
+ if kwds.get("dialect") is None:
+ return None
+
+ dialect = kwds["dialect"]
+ if dialect in csv.list_dialects():
+ dialect = csv.get_dialect(dialect)
+
+ _validate_dialect(dialect)
+
+ return dialect
+
+
+MANDATORY_DIALECT_ATTRS = (
+ "delimiter",
+ "doublequote",
+ "escapechar",
+ "skipinitialspace",
+ "quotechar",
+ "quoting",
+)
+
+
+def _validate_dialect(dialect: csv.Dialect) -> None:
+ """
+ Validate csv dialect instance.
+
+ Raises
+ ------
+ ValueError
+ If incorrect dialect is provided.
+ """
+ for param in MANDATORY_DIALECT_ATTRS:
+ if not hasattr(dialect, param):
+ raise ValueError(f"Invalid dialect {dialect} provided")
+
+
+def _merge_with_dialect_properties(
+ dialect: csv.Dialect,
+ defaults: dict[str, Any],
+) -> dict[str, Any]:
+ """
+ Merge default kwargs in TextFileReader with dialect parameters.
+
+ Parameters
+ ----------
+ dialect : csv.Dialect
+ Concrete csv dialect. See csv.Dialect documentation for more details.
+ defaults : dict
+ Keyword arguments passed to TextFileReader.
+
+ Returns
+ -------
+ kwds : dict
+ Updated keyword arguments, merged with dialect parameters.
+ """
+ kwds = defaults.copy()
+
+ for param in MANDATORY_DIALECT_ATTRS:
+ dialect_val = getattr(dialect, param)
+
+ parser_default = parser_defaults[param]
+ provided = kwds.get(param, parser_default)
+
+ # Messages for conflicting values between the dialect
+ # instance and the actual parameters provided.
+ conflict_msgs = []
+
+ # Don't warn if the default parameter was passed in,
+ # even if it conflicts with the dialect (gh-23761).
+ if provided not in (parser_default, dialect_val):
+ msg = (
+ f"Conflicting values for '{param}': '{provided}' was "
+ f"provided, but the dialect specifies '{dialect_val}'. "
+ "Using the dialect-specified value."
+ )
+
+ # Annoying corner case for not warning about
+ # conflicts between dialect and delimiter parameter.
+ # Refer to the outer "_read_" function for more info.
+ if not (param == "delimiter" and kwds.pop("sep_override", False)):
+ conflict_msgs.append(msg)
+
+ if conflict_msgs:
+ warnings.warn(
+ "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level()
+ )
+ kwds[param] = dialect_val
+ return kwds
+
+
+def _validate_skipfooter(kwds: dict[str, Any]) -> None:
+ """
+ Check whether skipfooter is compatible with other kwargs in TextFileReader.
+
+ Parameters
+ ----------
+ kwds : dict
+ Keyword arguments passed to TextFileReader.
+
+ Raises
+ ------
+ ValueError
+ If skipfooter is not compatible with other parameters.
+ """
+ if kwds.get("skipfooter"):
+ if kwds.get("iterator") or kwds.get("chunksize"):
+ raise ValueError("'skipfooter' not supported for iteration")
+ if kwds.get("nrows"):
+ raise ValueError("'skipfooter' not supported with 'nrows'")
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/pickle.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dae0e7106b69a471f0c2702158cfe0f11f0389c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/pickle.py
@@ -0,0 +1,210 @@
+""" pickle compat """
+from __future__ import annotations
+
+import pickle
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
+import warnings
+
+from pandas.compat import pickle_compat as pc
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.common import get_handle
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ CompressionOptions,
+ FilePath,
+ ReadPickleBuffer,
+ StorageOptions,
+ WriteBuffer,
+ )
+
+ from pandas import (
+ DataFrame,
+ Series,
+ )
+
+
+@doc(
+ storage_options=_shared_docs["storage_options"],
+ compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",
+)
+def to_pickle(
+ obj: Any,
+ filepath_or_buffer: FilePath | WriteBuffer[bytes],
+ compression: CompressionOptions = "infer",
+ protocol: int = pickle.HIGHEST_PROTOCOL,
+ storage_options: StorageOptions | None = None,
+) -> None:
+ """
+ Pickle (serialize) object to file.
+
+ Parameters
+ ----------
+ obj : any object
+ Any python object.
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``write()`` function.
+ Also accepts URL. URL has to be of S3 or GCS.
+ {compression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ protocol : int
+ Int which indicates which protocol should be used by the pickler,
+ default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
+ values for this parameter depend on the version of Python. For Python
+ 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value.
+ For Python >= 3.4, 4 is a valid value. A negative value for the
+ protocol parameter is equivalent to setting its value to
+ HIGHEST_PROTOCOL.
+
+ {storage_options}
+
+ .. [1] https://docs.python.org/3/library/pickle.html
+
+ See Also
+ --------
+ read_pickle : Load pickled pandas object (or any object) from file.
+ DataFrame.to_hdf : Write DataFrame to an HDF5 file.
+ DataFrame.to_sql : Write DataFrame to a SQL database.
+ DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
+
+ Examples
+ --------
+ >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP
+ >>> original_df # doctest: +SKIP
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP
+
+ >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
+ >>> unpickled_df # doctest: +SKIP
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ """ # noqa: E501
+ if protocol < 0:
+ protocol = pickle.HIGHEST_PROTOCOL
+
+ with get_handle(
+ filepath_or_buffer,
+ "wb",
+ compression=compression,
+ is_text=False,
+ storage_options=storage_options,
+ ) as handles:
+ # letting pickle write directly to the buffer is more memory-efficient
+ pickle.dump(obj, handles.handle, protocol=protocol)
+
+
+@doc(
+ storage_options=_shared_docs["storage_options"],
+ decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer",
+)
+def read_pickle(
+ filepath_or_buffer: FilePath | ReadPickleBuffer,
+ compression: CompressionOptions = "infer",
+ storage_options: StorageOptions | None = None,
+) -> DataFrame | Series:
+ """
+ Load pickled pandas object (or any object) from file.
+
+ .. warning::
+
+ Loading pickled data received from untrusted sources can be
+ unsafe. See `here `__.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``readlines()`` function.
+ Also accepts URL. URL is not limited to S3 and GCS.
+
+ {decompression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ {storage_options}
+
+ Returns
+ -------
+ same type as object stored in file
+
+ See Also
+ --------
+ DataFrame.to_pickle : Pickle (serialize) DataFrame object to file.
+ Series.to_pickle : Pickle (serialize) Series object to file.
+ read_hdf : Read HDF5 file into a DataFrame.
+ read_sql : Read SQL query or database table into a DataFrame.
+ read_parquet : Load a parquet object, returning a DataFrame.
+
+ Notes
+ -----
+ read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3
+ provided the object was serialized with to_pickle.
+
+ Examples
+ --------
+ >>> original_df = pd.DataFrame(
+ ... {{"foo": range(5), "bar": range(5, 10)}}
+ ... ) # doctest: +SKIP
+ >>> original_df # doctest: +SKIP
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP
+
+ >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
+ >>> unpickled_df # doctest: +SKIP
+ foo bar
+ 0 0 5
+ 1 1 6
+ 2 2 7
+ 3 3 8
+ 4 4 9
+ """
+ excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError)
+ with get_handle(
+ filepath_or_buffer,
+ "rb",
+ compression=compression,
+ is_text=False,
+ storage_options=storage_options,
+ ) as handles:
+ # 1) try standard library Pickle
+ # 2) try pickle_compat (older pandas version) to handle subclass changes
+ # 3) try pickle_compat with latin-1 encoding upon a UnicodeDecodeError
+
+ try:
+ # TypeError for Cython complaints about object.__new__ vs Tick.__new__
+ try:
+ with warnings.catch_warnings(record=True):
+ # We want to silence any warnings about, e.g. moved modules.
+ warnings.simplefilter("ignore", Warning)
+ return pickle.load(handles.handle)
+ except excs_to_catch:
+ # e.g.
+ # "No module named 'pandas.core.sparse.series'"
+ # "Can't get attribute '__nat_unpickle' on str:
+ # set the encoding if we need
+ if encoding is None:
+ encoding = _default_encoding
+
+ return encoding
+
+
+def _ensure_str(name):
+ """
+ Ensure that an index / column name is a str (python 3); otherwise they
+ may be np.string dtype. Non-string dtypes are passed through unchanged.
+
+ https://github.com/pandas-dev/pandas/issues/13492
+ """
+ if isinstance(name, str):
+ name = str(name)
+ return name
+
+
+Term = PyTablesExpr
+
+
+def _ensure_term(where, scope_level: int):
+ """
+ Ensure that the where is a Term or a list of Term.
+
+ This makes sure that we are capturing the scope of variables that are
+ passed create the terms here with a frame_level=2 (we are 2 levels down)
+ """
+ # only consider list/tuple here as an ndarray is automatically a coordinate
+ # list
+ level = scope_level + 1
+ if isinstance(where, (list, tuple)):
+ where = [
+ Term(term, scope_level=level + 1) if maybe_expression(term) else term
+ for term in where
+ if term is not None
+ ]
+ elif maybe_expression(where):
+ where = Term(where, scope_level=level)
+ return where if where is None or len(where) else None
+
+
+incompatibility_doc: Final = """
+where criteria is being ignored as this version [%s] is too old (or
+not-defined), read the file in and write it out to a new file to upgrade (with
+the copy_to method)
+"""
+
+attribute_conflict_doc: Final = """
+the [%s] attribute of the existing index is [%s] which conflicts with the new
+[%s], resetting the attribute to None
+"""
+
+performance_doc: Final = """
+your performance may suffer as PyTables will pickle object types that it cannot
+map directly to c-types [inferred_type->%s,key->%s] [items->%s]
+"""
+
+# formats
+_FORMAT_MAP = {"f": "fixed", "fixed": "fixed", "t": "table", "table": "table"}
+
+# axes map
+_AXES_MAP = {DataFrame: [0]}
+
+# register our configuration options
+dropna_doc: Final = """
+: boolean
+ drop ALL nan rows when appending to a table
+"""
+format_doc: Final = """
+: format
+ default format writing format, if None, then
+ put will default to 'fixed' and append will default to 'table'
+"""
+
+with config.config_prefix("io.hdf"):
+ config.register_option("dropna_table", False, dropna_doc, validator=config.is_bool)
+ config.register_option(
+ "default_format",
+ None,
+ format_doc,
+ validator=config.is_one_of_factory(["fixed", "table", None]),
+ )
+
+# oh the troubles to reduce import time
+_table_mod = None
+_table_file_open_policy_is_strict = False
+
+
+def _tables():
+ global _table_mod
+ global _table_file_open_policy_is_strict
+ if _table_mod is None:
+ import tables
+
+ _table_mod = tables
+
+ # set the file open policy
+ # return the file open policy; this changes as of pytables 3.1
+ # depending on the HDF5 version
+ with suppress(AttributeError):
+ _table_file_open_policy_is_strict = (
+ tables.file._FILE_OPEN_POLICY == "strict"
+ )
+
+ return _table_mod
+
+
+# interface to/from ###
+
+
+def to_hdf(
+ path_or_buf: FilePath | HDFStore,
+ key: str,
+ value: DataFrame | Series,
+ mode: str = "a",
+ complevel: int | None = None,
+ complib: str | None = None,
+ append: bool = False,
+ format: str | None = None,
+ index: bool = True,
+ min_itemsize: int | dict[str, int] | None = None,
+ nan_rep=None,
+ dropna: bool | None = None,
+ data_columns: Literal[True] | list[str] | None = None,
+ errors: str = "strict",
+ encoding: str = "UTF-8",
+) -> None:
+ """store this object, close it if we opened it"""
+ if append:
+ f = lambda store: store.append(
+ key,
+ value,
+ format=format,
+ index=index,
+ min_itemsize=min_itemsize,
+ nan_rep=nan_rep,
+ dropna=dropna,
+ data_columns=data_columns,
+ errors=errors,
+ encoding=encoding,
+ )
+ else:
+ # NB: dropna is not passed to `put`
+ f = lambda store: store.put(
+ key,
+ value,
+ format=format,
+ index=index,
+ min_itemsize=min_itemsize,
+ nan_rep=nan_rep,
+ data_columns=data_columns,
+ errors=errors,
+ encoding=encoding,
+ dropna=dropna,
+ )
+
+ path_or_buf = stringify_path(path_or_buf)
+ if isinstance(path_or_buf, str):
+ with HDFStore(
+ path_or_buf, mode=mode, complevel=complevel, complib=complib
+ ) as store:
+ f(store)
+ else:
+ f(path_or_buf)
+
+
+def read_hdf(
+ path_or_buf: FilePath | HDFStore,
+ key=None,
+ mode: str = "r",
+ errors: str = "strict",
+ where: str | list | None = None,
+ start: int | None = None,
+ stop: int | None = None,
+ columns: list[str] | None = None,
+ iterator: bool = False,
+ chunksize: int | None = None,
+ **kwargs,
+):
+ """
+ Read from the store, close it if we opened it.
+
+ Retrieve pandas object stored in file, optionally based on where
+ criteria.
+
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
+ Parameters
+ ----------
+ path_or_buf : str, path object, pandas.HDFStore
+ Any valid string path is acceptable. Only supports the local file system,
+ remote URLs and file-like objects are not supported.
+
+ If you want to pass in a path object, pandas accepts any
+ ``os.PathLike``.
+
+ Alternatively, pandas accepts an open :class:`pandas.HDFStore` object.
+
+ key : object, optional
+ The group identifier in the store. Can be omitted if the HDF file
+ contains a single pandas object.
+ mode : {'r', 'r+', 'a'}, default 'r'
+ Mode to use when opening the file. Ignored if path_or_buf is a
+ :class:`pandas.HDFStore`. Default is 'r'.
+ errors : str, default 'strict'
+ Specifies how encoding and decoding errors are to be handled.
+ See the errors argument for :func:`open` for a full list
+ of options.
+ where : list, optional
+ A list of Term (or convertible) objects.
+ start : int, optional
+ Row number to start selection.
+ stop : int, optional
+ Row number to stop selection.
+ columns : list, optional
+ A list of columns names to return.
+ iterator : bool, optional
+ Return an iterator object.
+ chunksize : int, optional
+ Number of rows to include in an iteration when using an iterator.
+ **kwargs
+ Additional keyword arguments passed to HDFStore.
+
+ Returns
+ -------
+ object
+ The selected object. Return type depends on the object stored.
+
+ See Also
+ --------
+ DataFrame.to_hdf : Write a HDF file from a DataFrame.
+ HDFStore : Low-level access to HDF files.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP
+ >>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP
+ >>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP
+ """
+ if mode not in ["r", "r+", "a"]:
+ raise ValueError(
+ f"mode {mode} is not allowed while performing a read. "
+ f"Allowed modes are r, r+ and a."
+ )
+ # grab the scope
+ if where is not None:
+ where = _ensure_term(where, scope_level=1)
+
+ if isinstance(path_or_buf, HDFStore):
+ if not path_or_buf.is_open:
+ raise OSError("The HDFStore must be open for reading.")
+
+ store = path_or_buf
+ auto_close = False
+ else:
+ path_or_buf = stringify_path(path_or_buf)
+ if not isinstance(path_or_buf, str):
+ raise NotImplementedError(
+ "Support for generic buffers has not been implemented."
+ )
+ try:
+ exists = os.path.exists(path_or_buf)
+
+ # if filepath is too long
+ except (TypeError, ValueError):
+ exists = False
+
+ if not exists:
+ raise FileNotFoundError(f"File {path_or_buf} does not exist")
+
+ store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs)
+ # can't auto open/close if we are using an iterator
+ # so delegate to the iterator
+ auto_close = True
+
+ try:
+ if key is None:
+ groups = store.groups()
+ if len(groups) == 0:
+ raise ValueError(
+ "Dataset(s) incompatible with Pandas data types, "
+ "not table, or no datasets found in HDF5 file."
+ )
+ candidate_only_group = groups[0]
+
+ # For the HDF file to have only one dataset, all other groups
+ # should then be metadata groups for that candidate group. (This
+ # assumes that the groups() method enumerates parent groups
+ # before their children.)
+ for group_to_check in groups[1:]:
+ if not _is_metadata_of(group_to_check, candidate_only_group):
+ raise ValueError(
+ "key must be provided when HDF5 "
+ "file contains multiple datasets."
+ )
+ key = candidate_only_group._v_pathname
+ return store.select(
+ key,
+ where=where,
+ start=start,
+ stop=stop,
+ columns=columns,
+ iterator=iterator,
+ chunksize=chunksize,
+ auto_close=auto_close,
+ )
+ except (ValueError, TypeError, LookupError):
+ if not isinstance(path_or_buf, HDFStore):
+ # if there is an error, close the store if we opened it.
+ with suppress(AttributeError):
+ store.close()
+
+ raise
+
+
+def _is_metadata_of(group: Node, parent_group: Node) -> bool:
+ """Check if a given group is a metadata group for a given parent_group."""
+ if group._v_depth <= parent_group._v_depth:
+ return False
+
+ current = group
+ while current._v_depth > 1:
+ parent = current._v_parent
+ if parent == parent_group and current._v_name == "meta":
+ return True
+ current = current._v_parent
+ return False
+
+
+class HDFStore:
+ """
+ Dict-like IO interface for storing pandas objects in PyTables.
+
+ Either Fixed or Table format.
+
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
+ Parameters
+ ----------
+ path : str
+ File path to HDF5 file.
+ mode : {'a', 'w', 'r', 'r+'}, default 'a'
+
+ ``'r'``
+ Read-only; no data can be modified.
+ ``'w'``
+ Write; a new file is created (an existing file with the same
+ name would be deleted).
+ ``'a'``
+ Append; an existing file is opened for reading and writing,
+ and if the file does not exist it is created.
+ ``'r+'``
+ It is similar to ``'a'``, but the file must already exist.
+ complevel : int, 0-9, default None
+ Specifies a compression level for data.
+ A value of 0 or None disables compression.
+ complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
+ Specifies the compression library to be used.
+ These additional compressors for Blosc are supported
+ (default if no compressor specified: 'blosc:blosclz'):
+ {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
+ 'blosc:zlib', 'blosc:zstd'}.
+ Specifying a compression library which is not available issues
+ a ValueError.
+ fletcher32 : bool, default False
+ If applying compression use the fletcher32 checksum.
+ **kwargs
+ These parameters will be passed to the PyTables open_file method.
+
+ Examples
+ --------
+ >>> bar = pd.DataFrame(np.random.randn(10, 4))
+ >>> store = pd.HDFStore('test.h5')
+ >>> store['foo'] = bar # write to HDF5
+ >>> bar = store['foo'] # retrieve
+ >>> store.close()
+
+ **Create or load HDF5 file in-memory**
+
+ When passing the `driver` option to the PyTables open_file method through
+ **kwargs, the HDF5 file is loaded or created in-memory and will only be
+ written when closed:
+
+ >>> bar = pd.DataFrame(np.random.randn(10, 4))
+ >>> store = pd.HDFStore('test.h5', driver='H5FD_CORE')
+ >>> store['foo'] = bar
+ >>> store.close() # only now, data is written to disk
+ """
+
+ _handle: File | None
+ _mode: str
+
+ def __init__(
+ self,
+ path,
+ mode: str = "a",
+ complevel: int | None = None,
+ complib=None,
+ fletcher32: bool = False,
+ **kwargs,
+ ) -> None:
+ if "format" in kwargs:
+ raise ValueError("format is not a defined argument for HDFStore")
+
+ tables = import_optional_dependency("tables")
+
+ if complib is not None and complib not in tables.filters.all_complibs:
+ raise ValueError(
+ f"complib only supports {tables.filters.all_complibs} compression."
+ )
+
+ if complib is None and complevel is not None:
+ complib = tables.filters.default_complib
+
+ self._path = stringify_path(path)
+ if mode is None:
+ mode = "a"
+ self._mode = mode
+ self._handle = None
+ self._complevel = complevel if complevel else 0
+ self._complib = complib
+ self._fletcher32 = fletcher32
+ self._filters = None
+ self.open(mode=mode, **kwargs)
+
+ def __fspath__(self) -> str:
+ return self._path
+
+ @property
+ def root(self):
+ """return the root node"""
+ self._check_if_open()
+ assert self._handle is not None # for mypy
+ return self._handle.root
+
+ @property
+ def filename(self) -> str:
+ return self._path
+
+ def __getitem__(self, key: str):
+ return self.get(key)
+
+ def __setitem__(self, key: str, value) -> None:
+ self.put(key, value)
+
+ def __delitem__(self, key: str) -> None:
+ return self.remove(key)
+
+ def __getattr__(self, name: str):
+ """allow attribute access to get stores"""
+ try:
+ return self.get(name)
+ except (KeyError, ClosedFileError):
+ pass
+ raise AttributeError(
+ f"'{type(self).__name__}' object has no attribute '{name}'"
+ )
+
+ def __contains__(self, key: str) -> bool:
+ """
+ check for existence of this key
+ can match the exact pathname or the pathnm w/o the leading '/'
+ """
+ node = self.get_node(key)
+ if node is not None:
+ name = node._v_pathname
+ if key in (name, name[1:]):
+ return True
+ return False
+
+ def __len__(self) -> int:
+ return len(self.groups())
+
+ def __repr__(self) -> str:
+ pstr = pprint_thing(self._path)
+ return f"{type(self)}\nFile path: {pstr}\n"
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+ def keys(self, include: str = "pandas") -> list[str]:
+ """
+ Return a list of keys corresponding to objects stored in HDFStore.
+
+ Parameters
+ ----------
+
+ include : str, default 'pandas'
+ When kind equals 'pandas' return pandas objects.
+ When kind equals 'native' return native HDF5 Table objects.
+
+ Returns
+ -------
+ list
+ List of ABSOLUTE path-names (e.g. have the leading '/').
+
+ Raises
+ ------
+ raises ValueError if kind has an illegal value
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
+ >>> store.put('data', df) # doctest: +SKIP
+ >>> store.get('data') # doctest: +SKIP
+ >>> print(store.keys()) # doctest: +SKIP
+ ['/data1', '/data2']
+ >>> store.close() # doctest: +SKIP
+ """
+ if include == "pandas":
+ return [n._v_pathname for n in self.groups()]
+
+ elif include == "native":
+ assert self._handle is not None # mypy
+ return [
+ n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
+ ]
+ raise ValueError(
+ f"`include` should be either 'pandas' or 'native' but is '{include}'"
+ )
+
+ def __iter__(self) -> Iterator[str]:
+ return iter(self.keys())
+
+ def items(self) -> Iterator[tuple[str, list]]:
+ """
+ iterate on key->group
+ """
+ for g in self.groups():
+ yield g._v_pathname, g
+
+ def open(self, mode: str = "a", **kwargs) -> None:
+ """
+ Open the file in the specified mode
+
+ Parameters
+ ----------
+ mode : {'a', 'w', 'r', 'r+'}, default 'a'
+ See HDFStore docstring or tables.open_file for info about modes
+ **kwargs
+ These parameters will be passed to the PyTables open_file method.
+ """
+ tables = _tables()
+
+ if self._mode != mode:
+ # if we are changing a write mode to read, ok
+ if self._mode in ["a", "w"] and mode in ["r", "r+"]:
+ pass
+ elif mode in ["w"]:
+ # this would truncate, raise here
+ if self.is_open:
+ raise PossibleDataLossError(
+ f"Re-opening the file [{self._path}] with mode [{self._mode}] "
+ "will delete the current file!"
+ )
+
+ self._mode = mode
+
+ # close and reopen the handle
+ if self.is_open:
+ self.close()
+
+ if self._complevel and self._complevel > 0:
+ self._filters = _tables().Filters(
+ self._complevel, self._complib, fletcher32=self._fletcher32
+ )
+
+ if _table_file_open_policy_is_strict and self.is_open:
+ msg = (
+ "Cannot open HDF5 file, which is already opened, "
+ "even in read-only mode."
+ )
+ raise ValueError(msg)
+
+ self._handle = tables.open_file(self._path, self._mode, **kwargs)
+
+ def close(self) -> None:
+ """
+ Close the PyTables file handle
+ """
+ if self._handle is not None:
+ self._handle.close()
+ self._handle = None
+
+ @property
+ def is_open(self) -> bool:
+ """
+ return a boolean indicating whether the file is open
+ """
+ if self._handle is None:
+ return False
+ return bool(self._handle.isopen)
+
+ def flush(self, fsync: bool = False) -> None:
+ """
+ Force all buffered modifications to be written to disk.
+
+ Parameters
+ ----------
+ fsync : bool (default False)
+ call ``os.fsync()`` on the file handle to force writing to disk.
+
+ Notes
+ -----
+ Without ``fsync=True``, flushing may not guarantee that the OS writes
+ to disk. With fsync, the operation will block until the OS claims the
+ file has been written; however, other caching layers may still
+ interfere.
+ """
+ if self._handle is not None:
+ self._handle.flush()
+ if fsync:
+ with suppress(OSError):
+ os.fsync(self._handle.fileno())
+
+ def get(self, key: str):
+ """
+ Retrieve pandas object stored in file.
+
+ Parameters
+ ----------
+ key : str
+
+ Returns
+ -------
+ object
+ Same type as object stored in file.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
+ >>> store.put('data', df) # doctest: +SKIP
+ >>> store.get('data') # doctest: +SKIP
+ >>> store.close() # doctest: +SKIP
+ """
+ with patch_pickle():
+ # GH#31167 Without this patch, pickle doesn't know how to unpickle
+ # old DateOffset objects now that they are cdef classes.
+ group = self.get_node(key)
+ if group is None:
+ raise KeyError(f"No object named {key} in the file")
+ return self._read_group(group)
+
+ def select(
+ self,
+ key: str,
+ where=None,
+ start=None,
+ stop=None,
+ columns=None,
+ iterator: bool = False,
+ chunksize: int | None = None,
+ auto_close: bool = False,
+ ):
+ """
+ Retrieve pandas object stored in file, optionally based on where criteria.
+
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
+ Parameters
+ ----------
+ key : str
+ Object being retrieved from file.
+ where : list or None
+ List of Term (or convertible) objects, optional.
+ start : int or None
+ Row number to start selection.
+ stop : int, default None
+ Row number to stop selection.
+ columns : list or None
+ A list of columns that if not None, will limit the return columns.
+ iterator : bool or False
+ Returns an iterator.
+ chunksize : int or None
+ Number or rows to include in iteration, return an iterator.
+ auto_close : bool or False
+ Should automatically close the store when finished.
+
+ Returns
+ -------
+ object
+ Retrieved object from file.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
+ >>> store.put('data', df) # doctest: +SKIP
+ >>> store.get('data') # doctest: +SKIP
+ >>> print(store.keys()) # doctest: +SKIP
+ ['/data1', '/data2']
+ >>> store.select('/data1') # doctest: +SKIP
+ A B
+ 0 1 2
+ 1 3 4
+ >>> store.select('/data1', where='columns == A') # doctest: +SKIP
+ A
+ 0 1
+ 1 3
+ >>> store.close() # doctest: +SKIP
+ """
+ group = self.get_node(key)
+ if group is None:
+ raise KeyError(f"No object named {key} in the file")
+
+ # create the storer and axes
+ where = _ensure_term(where, scope_level=1)
+ s = self._create_storer(group)
+ s.infer_axes()
+
+ # function to call on iteration
+ def func(_start, _stop, _where):
+ return s.read(start=_start, stop=_stop, where=_where, columns=columns)
+
+ # create the iterator
+ it = TableIterator(
+ self,
+ s,
+ func,
+ where=where,
+ nrows=s.nrows,
+ start=start,
+ stop=stop,
+ iterator=iterator,
+ chunksize=chunksize,
+ auto_close=auto_close,
+ )
+
+ return it.get_result()
+
+ def select_as_coordinates(
+ self,
+ key: str,
+ where=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ):
+ """
+ return the selection as an Index
+
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
+
+ Parameters
+ ----------
+ key : str
+ where : list of Term (or convertible) objects, optional
+ start : integer (defaults to None), row number to start selection
+ stop : integer (defaults to None), row number to stop selection
+ """
+ where = _ensure_term(where, scope_level=1)
+ tbl = self.get_storer(key)
+ if not isinstance(tbl, Table):
+ raise TypeError("can only read_coordinates with a table")
+ return tbl.read_coordinates(where=where, start=start, stop=stop)
+
+ def select_column(
+ self,
+ key: str,
+ column: str,
+ start: int | None = None,
+ stop: int | None = None,
+ ):
+ """
+ return a single column from the table. This is generally only useful to
+ select an indexable
+
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
+ Parameters
+ ----------
+ key : str
+ column : str
+ The column of interest.
+ start : int or None, default None
+ stop : int or None, default None
+
+ Raises
+ ------
+ raises KeyError if the column is not found (or key is not a valid
+ store)
+ raises ValueError if the column can not be extracted individually (it
+ is part of a data block)
+
+ """
+ tbl = self.get_storer(key)
+ if not isinstance(tbl, Table):
+ raise TypeError("can only read_column with a table")
+ return tbl.read_column(column=column, start=start, stop=stop)
+
+ def select_as_multiple(
+ self,
+ keys,
+ where=None,
+ selector=None,
+ columns=None,
+ start=None,
+ stop=None,
+ iterator: bool = False,
+ chunksize: int | None = None,
+ auto_close: bool = False,
+ ):
+ """
+ Retrieve pandas objects from multiple tables.
+
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
+ Parameters
+ ----------
+ keys : a list of the tables
+ selector : the table to apply the where criteria (defaults to keys[0]
+ if not supplied)
+ columns : the columns I want back
+ start : integer (defaults to None), row number to start selection
+ stop : integer (defaults to None), row number to stop selection
+ iterator : bool, return an iterator, default False
+ chunksize : nrows to include in iteration, return an iterator
+ auto_close : bool, default False
+ Should automatically close the store when finished.
+
+ Raises
+ ------
+ raises KeyError if keys or selector is not found or keys is empty
+ raises TypeError if keys is not a list or tuple
+ raises ValueError if the tables are not ALL THE SAME DIMENSIONS
+ """
+ # default to single select
+ where = _ensure_term(where, scope_level=1)
+ if isinstance(keys, (list, tuple)) and len(keys) == 1:
+ keys = keys[0]
+ if isinstance(keys, str):
+ return self.select(
+ key=keys,
+ where=where,
+ columns=columns,
+ start=start,
+ stop=stop,
+ iterator=iterator,
+ chunksize=chunksize,
+ auto_close=auto_close,
+ )
+
+ if not isinstance(keys, (list, tuple)):
+ raise TypeError("keys must be a list/tuple")
+
+ if not len(keys):
+ raise ValueError("keys must have a non-zero length")
+
+ if selector is None:
+ selector = keys[0]
+
+ # collect the tables
+ tbls = [self.get_storer(k) for k in keys]
+ s = self.get_storer(selector)
+
+ # validate rows
+ nrows = None
+ for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
+ if t is None:
+ raise KeyError(f"Invalid table [{k}]")
+ if not t.is_table:
+ raise TypeError(
+ f"object [{t.pathname}] is not a table, and cannot be used in all "
+ "select as multiple"
+ )
+
+ if nrows is None:
+ nrows = t.nrows
+ elif t.nrows != nrows:
+ raise ValueError("all tables must have exactly the same nrows!")
+
+ # The isinstance checks here are redundant with the check above,
+ # but necessary for mypy; see GH#29757
+ _tbls = [x for x in tbls if isinstance(x, Table)]
+
+ # axis is the concentration axes
+ axis = {t.non_index_axes[0][0] for t in _tbls}.pop()
+
+ def func(_start, _stop, _where):
+ # retrieve the objs, _where is always passed as a set of
+ # coordinates here
+ objs = [
+ t.read(where=_where, columns=columns, start=_start, stop=_stop)
+ for t in tbls
+ ]
+
+ # concat and return
+ return concat(objs, axis=axis, verify_integrity=False)._consolidate()
+
+ # create the iterator
+ it = TableIterator(
+ self,
+ s,
+ func,
+ where=where,
+ nrows=nrows,
+ start=start,
+ stop=stop,
+ iterator=iterator,
+ chunksize=chunksize,
+ auto_close=auto_close,
+ )
+
+ return it.get_result(coordinates=True)
+
+ def put(
+ self,
+ key: str,
+ value: DataFrame | Series,
+ format=None,
+ index: bool = True,
+ append: bool = False,
+ complib=None,
+ complevel: int | None = None,
+ min_itemsize: int | dict[str, int] | None = None,
+ nan_rep=None,
+ data_columns: Literal[True] | list[str] | None = None,
+ encoding=None,
+ errors: str = "strict",
+ track_times: bool = True,
+ dropna: bool = False,
+ ) -> None:
+ """
+ Store object in HDFStore.
+
+ Parameters
+ ----------
+ key : str
+ value : {Series, DataFrame}
+ format : 'fixed(f)|table(t)', default is 'fixed'
+ Format to use when storing object in HDFStore. Value can be one of:
+
+ ``'fixed'``
+ Fixed format. Fast writing/reading. Not-appendable, nor searchable.
+ ``'table'``
+ Table format. Write as a PyTables Table structure which may perform
+ worse but allow more flexible operations like searching / selecting
+ subsets of the data.
+ index : bool, default True
+ Write DataFrame index as a column.
+ append : bool, default False
+ This will force Table format, append the input data to the existing.
+ data_columns : list of columns or True, default None
+ List of columns to create as data columns, or True to use all columns.
+ See `here
+ `__.
+ encoding : str, default None
+ Provide an encoding for strings.
+ track_times : bool, default True
+ Parameter is propagated to 'create_table' method of 'PyTables'.
+ If set to False it enables to have the same h5 files (same hashes)
+ independent on creation time.
+ dropna : bool, default False, optional
+ Remove missing values.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
+ >>> store.put('data', df) # doctest: +SKIP
+ """
+ if format is None:
+ format = get_option("io.hdf.default_format") or "fixed"
+ format = self._validate_format(format)
+ self._write_to_group(
+ key,
+ value,
+ format=format,
+ index=index,
+ append=append,
+ complib=complib,
+ complevel=complevel,
+ min_itemsize=min_itemsize,
+ nan_rep=nan_rep,
+ data_columns=data_columns,
+ encoding=encoding,
+ errors=errors,
+ track_times=track_times,
+ dropna=dropna,
+ )
+
+ def remove(self, key: str, where=None, start=None, stop=None) -> None:
+ """
+ Remove pandas object partially by specifying the where condition
+
+ Parameters
+ ----------
+ key : str
+ Node to remove or delete rows from
+ where : list of Term (or convertible) objects, optional
+ start : integer (defaults to None), row number to start selection
+ stop : integer (defaults to None), row number to stop selection
+
+ Returns
+ -------
+ number of rows removed (or None if not a Table)
+
+ Raises
+ ------
+ raises KeyError if key is not a valid store
+
+ """
+ where = _ensure_term(where, scope_level=1)
+ try:
+ s = self.get_storer(key)
+ except KeyError:
+ # the key is not a valid store, re-raising KeyError
+ raise
+ except AssertionError:
+ # surface any assertion errors for e.g. debugging
+ raise
+ except Exception as err:
+ # In tests we get here with ClosedFileError, TypeError, and
+ # _table_mod.NoSuchNodeError. TODO: Catch only these?
+
+ if where is not None:
+ raise ValueError(
+ "trying to remove a node with a non-None where clause!"
+ ) from err
+
+ # we are actually trying to remove a node (with children)
+ node = self.get_node(key)
+ if node is not None:
+ node._f_remove(recursive=True)
+ return None
+
+ # remove the node
+ if com.all_none(where, start, stop):
+ s.group._f_remove(recursive=True)
+
+ # delete from the table
+ else:
+ if not s.is_table:
+ raise ValueError(
+ "can only remove with where on objects written as tables"
+ )
+ return s.delete(where=where, start=start, stop=stop)
+
+ def append(
+ self,
+ key: str,
+ value: DataFrame | Series,
+ format=None,
+ axes=None,
+ index: bool | list[str] = True,
+ append: bool = True,
+ complib=None,
+ complevel: int | None = None,
+ columns=None,
+ min_itemsize: int | dict[str, int] | None = None,
+ nan_rep=None,
+ chunksize: int | None = None,
+ expectedrows=None,
+ dropna: bool | None = None,
+ data_columns: Literal[True] | list[str] | None = None,
+ encoding=None,
+ errors: str = "strict",
+ ) -> None:
+ """
+ Append to Table in file.
+
+ Node must already exist and be Table format.
+
+ Parameters
+ ----------
+ key : str
+ value : {Series, DataFrame}
+ format : 'table' is the default
+ Format to use when storing object in HDFStore. Value can be one of:
+
+ ``'table'``
+ Table format. Write as a PyTables Table structure which may perform
+ worse but allow more flexible operations like searching / selecting
+ subsets of the data.
+ index : bool, default True
+ Write DataFrame index as a column.
+ append : bool, default True
+ Append the input data to the existing.
+ data_columns : list of columns, or True, default None
+ List of columns to create as indexed data columns for on-disk
+ queries, or True to use all columns. By default only the axes
+ of the object are indexed. See `here
+ `__.
+ min_itemsize : dict of columns that specify minimum str sizes
+ nan_rep : str to use as str nan representation
+ chunksize : size to chunk the writing
+ expectedrows : expected TOTAL row size of this table
+ encoding : default None, provide an encoding for str
+ dropna : bool, default False, optional
+ Do not write an ALL nan row to the store settable
+ by the option 'io.hdf.dropna_table'.
+
+ Notes
+ -----
+ Does *not* check if data being appended overlaps with existing
+ data in the table, so be careful
+
+ Examples
+ --------
+ >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
+ >>> store.put('data', df1, format='table') # doctest: +SKIP
+ >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
+ >>> store.append('data', df2) # doctest: +SKIP
+ >>> store.close() # doctest: +SKIP
+ A B
+ 0 1 2
+ 1 3 4
+ 0 5 6
+ 1 7 8
+ """
+ if columns is not None:
+ raise TypeError(
+ "columns is not a supported keyword in append, try data_columns"
+ )
+
+ if dropna is None:
+ dropna = get_option("io.hdf.dropna_table")
+ if format is None:
+ format = get_option("io.hdf.default_format") or "table"
+ format = self._validate_format(format)
+ self._write_to_group(
+ key,
+ value,
+ format=format,
+ axes=axes,
+ index=index,
+ append=append,
+ complib=complib,
+ complevel=complevel,
+ min_itemsize=min_itemsize,
+ nan_rep=nan_rep,
+ chunksize=chunksize,
+ expectedrows=expectedrows,
+ dropna=dropna,
+ data_columns=data_columns,
+ encoding=encoding,
+ errors=errors,
+ )
+
+ def append_to_multiple(
+ self,
+ d: dict,
+ value,
+ selector,
+ data_columns=None,
+ axes=None,
+ dropna: bool = False,
+ **kwargs,
+ ) -> None:
+ """
+ Append to multiple tables
+
+ Parameters
+ ----------
+ d : a dict of table_name to table_columns, None is acceptable as the
+ values of one node (this will get all the remaining columns)
+ value : a pandas object
+ selector : a string that designates the indexable table; all of its
+ columns will be designed as data_columns, unless data_columns is
+ passed, in which case these are used
+ data_columns : list of columns to create as data columns, or True to
+ use all columns
+ dropna : if evaluates to True, drop rows from all tables if any single
+ row in each table has all NaN. Default False.
+
+ Notes
+ -----
+ axes parameter is currently not accepted
+
+ """
+ if axes is not None:
+ raise TypeError(
+ "axes is currently not accepted as a parameter to append_to_multiple; "
+ "you can create the tables independently instead"
+ )
+
+ if not isinstance(d, dict):
+ raise ValueError(
+ "append_to_multiple must have a dictionary specified as the "
+ "way to split the value"
+ )
+
+ if selector not in d:
+ raise ValueError(
+ "append_to_multiple requires a selector that is in passed dict"
+ )
+
+ # figure out the splitting axis (the non_index_axis)
+ axis = next(iter(set(range(value.ndim)) - set(_AXES_MAP[type(value)])))
+
+ # figure out how to split the value
+ remain_key = None
+ remain_values: list = []
+ for k, v in d.items():
+ if v is None:
+ if remain_key is not None:
+ raise ValueError(
+ "append_to_multiple can only have one value in d that is None"
+ )
+ remain_key = k
+ else:
+ remain_values.extend(v)
+ if remain_key is not None:
+ ordered = value.axes[axis]
+ ordd = ordered.difference(Index(remain_values))
+ ordd = sorted(ordered.get_indexer(ordd))
+ d[remain_key] = ordered.take(ordd)
+
+ # data_columns
+ if data_columns is None:
+ data_columns = d[selector]
+
+ # ensure rows are synchronized across the tables
+ if dropna:
+ idxs = (value[cols].dropna(how="all").index for cols in d.values())
+ valid_index = next(idxs)
+ for index in idxs:
+ valid_index = valid_index.intersection(index)
+ value = value.loc[valid_index]
+
+ min_itemsize = kwargs.pop("min_itemsize", None)
+
+ # append
+ for k, v in d.items():
+ dc = data_columns if k == selector else None
+
+ # compute the val
+ val = value.reindex(v, axis=axis)
+
+ filtered = (
+ {key: value for (key, value) in min_itemsize.items() if key in v}
+ if min_itemsize is not None
+ else None
+ )
+ self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
+
+ def create_table_index(
+ self,
+ key: str,
+ columns=None,
+ optlevel: int | None = None,
+ kind: str | None = None,
+ ) -> None:
+ """
+ Create a pytables index on the table.
+
+ Parameters
+ ----------
+ key : str
+ columns : None, bool, or listlike[str]
+ Indicate which columns to create an index on.
+
+ * False : Do not create any indexes.
+ * True : Create indexes on all columns.
+ * None : Create indexes on all columns.
+ * listlike : Create indexes on the given columns.
+
+ optlevel : int or None, default None
+ Optimization level, if None, pytables defaults to 6.
+ kind : str or None, default None
+ Kind of index, if None, pytables defaults to "medium".
+
+ Raises
+ ------
+ TypeError: raises if the node is not a table
+ """
+ # version requirements
+ _tables()
+ s = self.get_storer(key)
+ if s is None:
+ return
+
+ if not isinstance(s, Table):
+ raise TypeError("cannot create table index on a Fixed format store")
+ s.create_index(columns=columns, optlevel=optlevel, kind=kind)
+
+ def groups(self) -> list:
+ """
+ Return a list of all the top-level nodes.
+
+ Each node returned is not a pandas storage object.
+
+ Returns
+ -------
+ list
+ List of objects.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
+ >>> store.put('data', df) # doctest: +SKIP
+ >>> print(store.groups()) # doctest: +SKIP
+ >>> store.close() # doctest: +SKIP
+ [/data (Group) ''
+ children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array),
+ 'block0_items' (Array)]]
+ """
+ _tables()
+ self._check_if_open()
+ assert self._handle is not None # for mypy
+ assert _table_mod is not None # for mypy
+ return [
+ g
+ for g in self._handle.walk_groups()
+ if (
+ not isinstance(g, _table_mod.link.Link)
+ and (
+ getattr(g._v_attrs, "pandas_type", None)
+ or getattr(g, "table", None)
+ or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
+ )
+ )
+ ]
+
+ def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]:
+ """
+ Walk the pytables group hierarchy for pandas objects.
+
+ This generator will yield the group path, subgroups and pandas object
+ names for each group.
+
+ Any non-pandas PyTables objects that are not a group will be ignored.
+
+ The `where` group itself is listed first (preorder), then each of its
+ child groups (following an alphanumerical order) is also traversed,
+ following the same procedure.
+
+ Parameters
+ ----------
+ where : str, default "/"
+ Group where to start walking.
+
+ Yields
+ ------
+ path : str
+ Full path to a group (without trailing '/').
+ groups : list
+ Names (strings) of the groups contained in `path`.
+ leaves : list
+ Names (strings) of the pandas objects contained in `path`.
+
+ Examples
+ --------
+ >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
+ >>> store.put('data', df1, format='table') # doctest: +SKIP
+ >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
+ >>> store.append('data', df2) # doctest: +SKIP
+ >>> store.close() # doctest: +SKIP
+ >>> for group in store.walk(): # doctest: +SKIP
+ ... print(group) # doctest: +SKIP
+ >>> store.close() # doctest: +SKIP
+ """
+ _tables()
+ self._check_if_open()
+ assert self._handle is not None # for mypy
+ assert _table_mod is not None # for mypy
+
+ for g in self._handle.walk_groups(where):
+ if getattr(g._v_attrs, "pandas_type", None) is not None:
+ continue
+
+ groups = []
+ leaves = []
+ for child in g._v_children.values():
+ pandas_type = getattr(child._v_attrs, "pandas_type", None)
+ if pandas_type is None:
+ if isinstance(child, _table_mod.group.Group):
+ groups.append(child._v_name)
+ else:
+ leaves.append(child._v_name)
+
+ yield (g._v_pathname.rstrip("/"), groups, leaves)
+
+ def get_node(self, key: str) -> Node | None:
+ """return the node with the key or None if it does not exist"""
+ self._check_if_open()
+ if not key.startswith("/"):
+ key = "/" + key
+
+ assert self._handle is not None
+ assert _table_mod is not None # for mypy
+ try:
+ node = self._handle.get_node(self.root, key)
+ except _table_mod.exceptions.NoSuchNodeError:
+ return None
+
+ assert isinstance(node, _table_mod.Node), type(node)
+ return node
+
+ def get_storer(self, key: str) -> GenericFixed | Table:
+ """return the storer object for a key, raise if not in the file"""
+ group = self.get_node(key)
+ if group is None:
+ raise KeyError(f"No object named {key} in the file")
+
+ s = self._create_storer(group)
+ s.infer_axes()
+ return s
+
+ def copy(
+ self,
+ file,
+ mode: str = "w",
+ propindexes: bool = True,
+ keys=None,
+ complib=None,
+ complevel: int | None = None,
+ fletcher32: bool = False,
+ overwrite: bool = True,
+ ) -> HDFStore:
+ """
+ Copy the existing store to a new file, updating in place.
+
+ Parameters
+ ----------
+ propindexes : bool, default True
+ Restore indexes in copied file.
+ keys : list, optional
+ List of keys to include in the copy (defaults to all).
+ overwrite : bool, default True
+ Whether to overwrite (remove and replace) existing nodes in the new store.
+ mode, complib, complevel, fletcher32 same as in HDFStore.__init__
+
+ Returns
+ -------
+ open file handle of the new store
+ """
+ new_store = HDFStore(
+ file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
+ )
+ if keys is None:
+ keys = list(self.keys())
+ if not isinstance(keys, (tuple, list)):
+ keys = [keys]
+ for k in keys:
+ s = self.get_storer(k)
+ if s is not None:
+ if k in new_store:
+ if overwrite:
+ new_store.remove(k)
+
+ data = self.select(k)
+ if isinstance(s, Table):
+ index: bool | list[str] = False
+ if propindexes:
+ index = [a.name for a in s.axes if a.is_indexed]
+ new_store.append(
+ k,
+ data,
+ index=index,
+ data_columns=getattr(s, "data_columns", None),
+ encoding=s.encoding,
+ )
+ else:
+ new_store.put(k, data, encoding=s.encoding)
+
+ return new_store
+
+ def info(self) -> str:
+ """
+ Print detailed information on the store.
+
+ Returns
+ -------
+ str
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
+ >>> store.put('data', df) # doctest: +SKIP
+ >>> print(store.info()) # doctest: +SKIP
+ >>> store.close() # doctest: +SKIP
+
+ File path: store.h5
+ /data frame (shape->[2,2])
+ """
+ path = pprint_thing(self._path)
+ output = f"{type(self)}\nFile path: {path}\n"
+
+ if self.is_open:
+ lkeys = sorted(self.keys())
+ if len(lkeys):
+ keys = []
+ values = []
+
+ for k in lkeys:
+ try:
+ s = self.get_storer(k)
+ if s is not None:
+ keys.append(pprint_thing(s.pathname or k))
+ values.append(pprint_thing(s or "invalid_HDFStore node"))
+ except AssertionError:
+ # surface any assertion errors for e.g. debugging
+ raise
+ except Exception as detail:
+ keys.append(k)
+ dstr = pprint_thing(detail)
+ values.append(f"[invalid_HDFStore node: {dstr}]")
+
+ output += adjoin(12, keys, values)
+ else:
+ output += "Empty"
+ else:
+ output += "File is CLOSED"
+
+ return output
+
+ # ------------------------------------------------------------------------
+ # private methods
+
+ def _check_if_open(self) -> None:
+ if not self.is_open:
+ raise ClosedFileError(f"{self._path} file is not open!")
+
+ def _validate_format(self, format: str) -> str:
+ """validate / deprecate formats"""
+ # validate
+ try:
+ format = _FORMAT_MAP[format.lower()]
+ except KeyError as err:
+ raise TypeError(f"invalid HDFStore format specified [{format}]") from err
+
+ return format
+
+ def _create_storer(
+ self,
+ group,
+ format=None,
+ value: DataFrame | Series | None = None,
+ encoding: str = "UTF-8",
+ errors: str = "strict",
+ ) -> GenericFixed | Table:
+ """return a suitable class to operate"""
+ cls: type[GenericFixed | Table]
+
+ if value is not None and not isinstance(value, (Series, DataFrame)):
+ raise TypeError("value must be None, Series, or DataFrame")
+
+ pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None))
+ tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None))
+
+ # infer the pt from the passed value
+ if pt is None:
+ if value is None:
+ _tables()
+ assert _table_mod is not None # for mypy
+ if getattr(group, "table", None) or isinstance(
+ group, _table_mod.table.Table
+ ):
+ pt = "frame_table"
+ tt = "generic_table"
+ else:
+ raise TypeError(
+ "cannot create a storer if the object is not existing "
+ "nor a value are passed"
+ )
+ else:
+ if isinstance(value, Series):
+ pt = "series"
+ else:
+ pt = "frame"
+
+ # we are actually a table
+ if format == "table":
+ pt += "_table"
+
+ # a storer node
+ if "table" not in pt:
+ _STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed}
+ try:
+ cls = _STORER_MAP[pt]
+ except KeyError as err:
+ raise TypeError(
+ f"cannot properly create the storer for: [_STORER_MAP] [group->"
+ f"{group},value->{type(value)},format->{format}"
+ ) from err
+ return cls(self, group, encoding=encoding, errors=errors)
+
+ # existing node (and must be a table)
+ if tt is None:
+ # if we are a writer, determine the tt
+ if value is not None:
+ if pt == "series_table":
+ index = getattr(value, "index", None)
+ if index is not None:
+ if index.nlevels == 1:
+ tt = "appendable_series"
+ elif index.nlevels > 1:
+ tt = "appendable_multiseries"
+ elif pt == "frame_table":
+ index = getattr(value, "index", None)
+ if index is not None:
+ if index.nlevels == 1:
+ tt = "appendable_frame"
+ elif index.nlevels > 1:
+ tt = "appendable_multiframe"
+
+ _TABLE_MAP = {
+ "generic_table": GenericTable,
+ "appendable_series": AppendableSeriesTable,
+ "appendable_multiseries": AppendableMultiSeriesTable,
+ "appendable_frame": AppendableFrameTable,
+ "appendable_multiframe": AppendableMultiFrameTable,
+ "worm": WORMTable,
+ }
+ try:
+ cls = _TABLE_MAP[tt]
+ except KeyError as err:
+ raise TypeError(
+ f"cannot properly create the storer for: [_TABLE_MAP] [group->"
+ f"{group},value->{type(value)},format->{format}"
+ ) from err
+
+ return cls(self, group, encoding=encoding, errors=errors)
+
+ def _write_to_group(
+ self,
+ key: str,
+ value: DataFrame | Series,
+ format,
+ axes=None,
+ index: bool | list[str] = True,
+ append: bool = False,
+ complib=None,
+ complevel: int | None = None,
+ fletcher32=None,
+ min_itemsize: int | dict[str, int] | None = None,
+ chunksize: int | None = None,
+ expectedrows=None,
+ dropna: bool = False,
+ nan_rep=None,
+ data_columns=None,
+ encoding=None,
+ errors: str = "strict",
+ track_times: bool = True,
+ ) -> None:
+ # we don't want to store a table node at all if our object is 0-len
+ # as there are not dtypes
+ if getattr(value, "empty", None) and (format == "table" or append):
+ return
+
+ group = self._identify_group(key, append)
+
+ s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
+ if append:
+ # raise if we are trying to append to a Fixed format,
+ # or a table that exists (and we are putting)
+ if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
+ raise ValueError("Can only append to Tables")
+ if not s.is_exists:
+ s.set_object_info()
+ else:
+ s.set_object_info()
+
+ if not s.is_table and complib:
+ raise ValueError("Compression not supported on Fixed format stores")
+
+ # write the object
+ s.write(
+ obj=value,
+ axes=axes,
+ append=append,
+ complib=complib,
+ complevel=complevel,
+ fletcher32=fletcher32,
+ min_itemsize=min_itemsize,
+ chunksize=chunksize,
+ expectedrows=expectedrows,
+ dropna=dropna,
+ nan_rep=nan_rep,
+ data_columns=data_columns,
+ track_times=track_times,
+ )
+
+ if isinstance(s, Table) and index:
+ s.create_index(columns=index)
+
+ def _read_group(self, group: Node):
+ s = self._create_storer(group)
+ s.infer_axes()
+ return s.read()
+
+ def _identify_group(self, key: str, append: bool) -> Node:
+ """Identify HDF5 group based on key, delete/create group if needed."""
+ group = self.get_node(key)
+
+ # we make this assertion for mypy; the get_node call will already
+ # have raised if this is incorrect
+ assert self._handle is not None
+
+ # remove the node if we are not appending
+ if group is not None and not append:
+ self._handle.remove_node(group, recursive=True)
+ group = None
+
+ if group is None:
+ group = self._create_nodes_and_group(key)
+
+ return group
+
+ def _create_nodes_and_group(self, key: str) -> Node:
+ """Create nodes from key and return group name."""
+ # assertion for mypy
+ assert self._handle is not None
+
+ paths = key.split("/")
+ # recursively create the groups
+ path = "/"
+ for p in paths:
+ if not len(p):
+ continue
+ new_path = path
+ if not path.endswith("/"):
+ new_path += "/"
+ new_path += p
+ group = self.get_node(new_path)
+ if group is None:
+ group = self._handle.create_group(path, p)
+ path = new_path
+ return group
+
+
+class TableIterator:
+ """
+ Define the iteration interface on a table
+
+ Parameters
+ ----------
+ store : HDFStore
+ s : the referred storer
+ func : the function to execute the query
+ where : the where of the query
+ nrows : the rows to iterate on
+ start : the passed start value (default is None)
+ stop : the passed stop value (default is None)
+ iterator : bool, default False
+ Whether to use the default iterator.
+ chunksize : the passed chunking value (default is 100000)
+ auto_close : bool, default False
+ Whether to automatically close the store at the end of iteration.
+ """
+
+ chunksize: int | None
+ store: HDFStore
+ s: GenericFixed | Table
+
+ def __init__(
+ self,
+ store: HDFStore,
+ s: GenericFixed | Table,
+ func,
+ where,
+ nrows,
+ start=None,
+ stop=None,
+ iterator: bool = False,
+ chunksize: int | None = None,
+ auto_close: bool = False,
+ ) -> None:
+ self.store = store
+ self.s = s
+ self.func = func
+ self.where = where
+
+ # set start/stop if they are not set if we are a table
+ if self.s.is_table:
+ if nrows is None:
+ nrows = 0
+ if start is None:
+ start = 0
+ if stop is None:
+ stop = nrows
+ stop = min(nrows, stop)
+
+ self.nrows = nrows
+ self.start = start
+ self.stop = stop
+
+ self.coordinates = None
+ if iterator or chunksize is not None:
+ if chunksize is None:
+ chunksize = 100000
+ self.chunksize = int(chunksize)
+ else:
+ self.chunksize = None
+
+ self.auto_close = auto_close
+
+ def __iter__(self) -> Iterator:
+ # iterate
+ current = self.start
+ if self.coordinates is None:
+ raise ValueError("Cannot iterate until get_result is called.")
+ while current < self.stop:
+ stop = min(current + self.chunksize, self.stop)
+ value = self.func(None, None, self.coordinates[current:stop])
+ current = stop
+ if value is None or not len(value):
+ continue
+
+ yield value
+
+ self.close()
+
+ def close(self) -> None:
+ if self.auto_close:
+ self.store.close()
+
+ def get_result(self, coordinates: bool = False):
+ # return the actual iterator
+ if self.chunksize is not None:
+ if not isinstance(self.s, Table):
+ raise TypeError("can only use an iterator or chunksize on a table")
+
+ self.coordinates = self.s.read_coordinates(where=self.where)
+
+ return self
+
+ # if specified read via coordinates (necessary for multiple selections
+ if coordinates:
+ if not isinstance(self.s, Table):
+ raise TypeError("can only read_coordinates on a table")
+ where = self.s.read_coordinates(
+ where=self.where, start=self.start, stop=self.stop
+ )
+ else:
+ where = self.where
+
+ # directly return the result
+ results = self.func(self.start, self.stop, where)
+ self.close()
+ return results
+
+
+class IndexCol:
+ """
+ an index column description class
+
+ Parameters
+ ----------
+ axis : axis which I reference
+ values : the ndarray like converted values
+ kind : a string description of this type
+ typ : the pytables type
+ pos : the position in the pytables
+
+ """
+
+ is_an_indexable: bool = True
+ is_data_indexable: bool = True
+ _info_fields = ["freq", "tz", "index_name"]
+
+ def __init__(
+ self,
+ name: str,
+ values=None,
+ kind=None,
+ typ=None,
+ cname: str | None = None,
+ axis=None,
+ pos=None,
+ freq=None,
+ tz=None,
+ index_name=None,
+ ordered=None,
+ table=None,
+ meta=None,
+ metadata=None,
+ ) -> None:
+ if not isinstance(name, str):
+ raise ValueError("`name` must be a str.")
+
+ self.values = values
+ self.kind = kind
+ self.typ = typ
+ self.name = name
+ self.cname = cname or name
+ self.axis = axis
+ self.pos = pos
+ self.freq = freq
+ self.tz = tz
+ self.index_name = index_name
+ self.ordered = ordered
+ self.table = table
+ self.meta = meta
+ self.metadata = metadata
+
+ if pos is not None:
+ self.set_pos(pos)
+
+ # These are ensured as long as the passed arguments match the
+ # constructor annotations.
+ assert isinstance(self.name, str)
+ assert isinstance(self.cname, str)
+
+ @property
+ def itemsize(self) -> int:
+ # Assumes self.typ has already been initialized
+ return self.typ.itemsize
+
+ @property
+ def kind_attr(self) -> str:
+ return f"{self.name}_kind"
+
+ def set_pos(self, pos: int) -> None:
+ """set the position of this column in the Table"""
+ self.pos = pos
+ if pos is not None and self.typ is not None:
+ self.typ._v_pos = pos
+
+ def __repr__(self) -> str:
+ temp = tuple(
+ map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))
+ )
+ return ",".join(
+ [
+ f"{key}->{value}"
+ for key, value in zip(["name", "cname", "axis", "pos", "kind"], temp)
+ ]
+ )
+
+ def __eq__(self, other: object) -> bool:
+ """compare 2 col items"""
+ return all(
+ getattr(self, a, None) == getattr(other, a, None)
+ for a in ["name", "cname", "axis", "pos"]
+ )
+
+ def __ne__(self, other) -> bool:
+ return not self.__eq__(other)
+
+ @property
+ def is_indexed(self) -> bool:
+ """return whether I am an indexed column"""
+ if not hasattr(self.table, "cols"):
+ # e.g. if infer hasn't been called yet, self.table will be None.
+ return False
+ return getattr(self.table.cols, self.cname).is_indexed
+
+ def convert(
+ self, values: np.ndarray, nan_rep, encoding: str, errors: str
+ ) -> tuple[np.ndarray, np.ndarray] | tuple[Index, Index]:
+ """
+ Convert the data from this selection to the appropriate pandas type.
+ """
+ assert isinstance(values, np.ndarray), type(values)
+
+ # values is a recarray
+ if values.dtype.fields is not None:
+ # Copy, otherwise values will be a view
+ # preventing the original recarry from being free'ed
+ values = values[self.cname].copy()
+
+ val_kind = _ensure_decoded(self.kind)
+ values = _maybe_convert(values, val_kind, encoding, errors)
+ kwargs = {}
+ kwargs["name"] = _ensure_decoded(self.index_name)
+
+ if self.freq is not None:
+ kwargs["freq"] = _ensure_decoded(self.freq)
+
+ factory: type[Index | DatetimeIndex] = Index
+ if lib.is_np_dtype(values.dtype, "M") or isinstance(
+ values.dtype, DatetimeTZDtype
+ ):
+ factory = DatetimeIndex
+ elif values.dtype == "i8" and "freq" in kwargs:
+ # PeriodIndex data is stored as i8
+ # error: Incompatible types in assignment (expression has type
+ # "Callable[[Any, KwArg(Any)], PeriodIndex]", variable has type
+ # "Union[Type[Index], Type[DatetimeIndex]]")
+ factory = lambda x, **kwds: PeriodIndex.from_ordinals( # type: ignore[assignment]
+ x, freq=kwds.get("freq", None)
+ )._rename(
+ kwds["name"]
+ )
+
+ # making an Index instance could throw a number of different errors
+ try:
+ new_pd_index = factory(values, **kwargs)
+ except ValueError:
+ # if the output freq is different that what we recorded,
+ # it should be None (see also 'doc example part 2')
+ if "freq" in kwargs:
+ kwargs["freq"] = None
+ new_pd_index = factory(values, **kwargs)
+ final_pd_index = _set_tz(new_pd_index, self.tz)
+ return final_pd_index, final_pd_index
+
+ def take_data(self):
+ """return the values"""
+ return self.values
+
+ @property
+ def attrs(self):
+ return self.table._v_attrs
+
+ @property
+ def description(self):
+ return self.table.description
+
+ @property
+ def col(self):
+ """return my current col description"""
+ return getattr(self.description, self.cname, None)
+
+ @property
+ def cvalues(self):
+ """return my cython values"""
+ return self.values
+
+ def __iter__(self) -> Iterator:
+ return iter(self.values)
+
+ def maybe_set_size(self, min_itemsize=None) -> None:
+ """
+ maybe set a string col itemsize:
+ min_itemsize can be an integer or a dict with this columns name
+ with an integer size
+ """
+ if _ensure_decoded(self.kind) == "string":
+ if isinstance(min_itemsize, dict):
+ min_itemsize = min_itemsize.get(self.name)
+
+ if min_itemsize is not None and self.typ.itemsize < min_itemsize:
+ self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)
+
+ def validate_names(self) -> None:
+ pass
+
+ def validate_and_set(self, handler: AppendableTable, append: bool) -> None:
+ self.table = handler.table
+ self.validate_col()
+ self.validate_attr(append)
+ self.validate_metadata(handler)
+ self.write_metadata(handler)
+ self.set_attr()
+
+ def validate_col(self, itemsize=None):
+ """validate this column: return the compared against itemsize"""
+ # validate this column for string truncation (or reset to the max size)
+ if _ensure_decoded(self.kind) == "string":
+ c = self.col
+ if c is not None:
+ if itemsize is None:
+ itemsize = self.itemsize
+ if c.itemsize < itemsize:
+ raise ValueError(
+ f"Trying to store a string with len [{itemsize}] in "
+ f"[{self.cname}] column but\nthis column has a limit of "
+ f"[{c.itemsize}]!\nConsider using min_itemsize to "
+ "preset the sizes on these columns"
+ )
+ return c.itemsize
+
+ return None
+
+ def validate_attr(self, append: bool) -> None:
+ # check for backwards incompatibility
+ if append:
+ existing_kind = getattr(self.attrs, self.kind_attr, None)
+ if existing_kind is not None and existing_kind != self.kind:
+ raise TypeError(
+ f"incompatible kind in col [{existing_kind} - {self.kind}]"
+ )
+
+ def update_info(self, info) -> None:
+ """
+ set/update the info for this indexable with the key/value
+ if there is a conflict raise/warn as needed
+ """
+ for key in self._info_fields:
+ value = getattr(self, key, None)
+ idx = info.setdefault(self.name, {})
+
+ existing_value = idx.get(key)
+ if key in idx and value is not None and existing_value != value:
+ # frequency/name just warn
+ if key in ["freq", "index_name"]:
+ ws = attribute_conflict_doc % (key, existing_value, value)
+ warnings.warn(
+ ws, AttributeConflictWarning, stacklevel=find_stack_level()
+ )
+
+ # reset
+ idx[key] = None
+ setattr(self, key, None)
+
+ else:
+ raise ValueError(
+ f"invalid info for [{self.name}] for [{key}], "
+ f"existing_value [{existing_value}] conflicts with "
+ f"new value [{value}]"
+ )
+ elif value is not None or existing_value is not None:
+ idx[key] = value
+
+ def set_info(self, info) -> None:
+ """set my state from the passed info"""
+ idx = info.get(self.name)
+ if idx is not None:
+ self.__dict__.update(idx)
+
+ def set_attr(self) -> None:
+ """set the kind for this column"""
+ setattr(self.attrs, self.kind_attr, self.kind)
+
+ def validate_metadata(self, handler: AppendableTable) -> None:
+ """validate that kind=category does not change the categories"""
+ if self.meta == "category":
+ new_metadata = self.metadata
+ cur_metadata = handler.read_metadata(self.cname)
+ if (
+ new_metadata is not None
+ and cur_metadata is not None
+ and not array_equivalent(
+ new_metadata, cur_metadata, strict_nan=True, dtype_equal=True
+ )
+ ):
+ raise ValueError(
+ "cannot append a categorical with "
+ "different categories to the existing"
+ )
+
+ def write_metadata(self, handler: AppendableTable) -> None:
+ """set the meta data"""
+ if self.metadata is not None:
+ handler.write_metadata(self.cname, self.metadata)
+
+
+class GenericIndexCol(IndexCol):
+ """an index which is not represented in the data of the table"""
+
+ @property
+ def is_indexed(self) -> bool:
+ return False
+
+ def convert(
+ self, values: np.ndarray, nan_rep, encoding: str, errors: str
+ ) -> tuple[Index, Index]:
+ """
+ Convert the data from this selection to the appropriate pandas type.
+
+ Parameters
+ ----------
+ values : np.ndarray
+ nan_rep : str
+ encoding : str
+ errors : str
+ """
+ assert isinstance(values, np.ndarray), type(values)
+
+ index = RangeIndex(len(values))
+ return index, index
+
+ def set_attr(self) -> None:
+ pass
+
+
+class DataCol(IndexCol):
+ """
+ a data holding column, by definition this is not indexable
+
+ Parameters
+ ----------
+ data : the actual data
+ cname : the column name in the table to hold the data (typically
+ values)
+ meta : a string description of the metadata
+ metadata : the actual metadata
+ """
+
+ is_an_indexable = False
+ is_data_indexable = False
+ _info_fields = ["tz", "ordered"]
+
+ def __init__(
+ self,
+ name: str,
+ values=None,
+ kind=None,
+ typ=None,
+ cname: str | None = None,
+ pos=None,
+ tz=None,
+ ordered=None,
+ table=None,
+ meta=None,
+ metadata=None,
+ dtype: DtypeArg | None = None,
+ data=None,
+ ) -> None:
+ super().__init__(
+ name=name,
+ values=values,
+ kind=kind,
+ typ=typ,
+ pos=pos,
+ cname=cname,
+ tz=tz,
+ ordered=ordered,
+ table=table,
+ meta=meta,
+ metadata=metadata,
+ )
+ self.dtype = dtype
+ self.data = data
+
+ @property
+ def dtype_attr(self) -> str:
+ return f"{self.name}_dtype"
+
+ @property
+ def meta_attr(self) -> str:
+ return f"{self.name}_meta"
+
+ def __repr__(self) -> str:
+ temp = tuple(
+ map(
+ pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape)
+ )
+ )
+ return ",".join(
+ [
+ f"{key}->{value}"
+ for key, value in zip(["name", "cname", "dtype", "kind", "shape"], temp)
+ ]
+ )
+
+ def __eq__(self, other: object) -> bool:
+ """compare 2 col items"""
+ return all(
+ getattr(self, a, None) == getattr(other, a, None)
+ for a in ["name", "cname", "dtype", "pos"]
+ )
+
+ def set_data(self, data: ArrayLike) -> None:
+ assert data is not None
+ assert self.dtype is None
+
+ data, dtype_name = _get_data_and_dtype_name(data)
+
+ self.data = data
+ self.dtype = dtype_name
+ self.kind = _dtype_to_kind(dtype_name)
+
+ def take_data(self):
+ """return the data"""
+ return self.data
+
+ @classmethod
+ def _get_atom(cls, values: ArrayLike) -> Col:
+ """
+ Get an appropriately typed and shaped pytables.Col object for values.
+ """
+ dtype = values.dtype
+ # error: Item "ExtensionDtype" of "Union[ExtensionDtype, dtype[Any]]" has no
+ # attribute "itemsize"
+ itemsize = dtype.itemsize # type: ignore[union-attr]
+
+ shape = values.shape
+ if values.ndim == 1:
+ # EA, use block shape pretending it is 2D
+ # TODO(EA2D): not necessary with 2D EAs
+ shape = (1, values.size)
+
+ if isinstance(values, Categorical):
+ codes = values.codes
+ atom = cls.get_atom_data(shape, kind=codes.dtype.name)
+ elif lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype):
+ atom = cls.get_atom_datetime64(shape)
+ elif lib.is_np_dtype(dtype, "m"):
+ atom = cls.get_atom_timedelta64(shape)
+ elif is_complex_dtype(dtype):
+ atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0])
+ elif is_string_dtype(dtype):
+ atom = cls.get_atom_string(shape, itemsize)
+ else:
+ atom = cls.get_atom_data(shape, kind=dtype.name)
+
+ return atom
+
+ @classmethod
+ def get_atom_string(cls, shape, itemsize):
+ return _tables().StringCol(itemsize=itemsize, shape=shape[0])
+
+ @classmethod
+ def get_atom_coltype(cls, kind: str) -> type[Col]:
+ """return the PyTables column class for this column"""
+ if kind.startswith("uint"):
+ k4 = kind[4:]
+ col_name = f"UInt{k4}Col"
+ elif kind.startswith("period"):
+ # we store as integer
+ col_name = "Int64Col"
+ else:
+ kcap = kind.capitalize()
+ col_name = f"{kcap}Col"
+
+ return getattr(_tables(), col_name)
+
+ @classmethod
+ def get_atom_data(cls, shape, kind: str) -> Col:
+ return cls.get_atom_coltype(kind=kind)(shape=shape[0])
+
+ @classmethod
+ def get_atom_datetime64(cls, shape):
+ return _tables().Int64Col(shape=shape[0])
+
+ @classmethod
+ def get_atom_timedelta64(cls, shape):
+ return _tables().Int64Col(shape=shape[0])
+
+ @property
+ def shape(self):
+ return getattr(self.data, "shape", None)
+
+ @property
+ def cvalues(self):
+ """return my cython values"""
+ return self.data
+
+ def validate_attr(self, append) -> None:
+ """validate that we have the same order as the existing & same dtype"""
+ if append:
+ existing_fields = getattr(self.attrs, self.kind_attr, None)
+ if existing_fields is not None and existing_fields != list(self.values):
+ raise ValueError("appended items do not match existing items in table!")
+
+ existing_dtype = getattr(self.attrs, self.dtype_attr, None)
+ if existing_dtype is not None and existing_dtype != self.dtype:
+ raise ValueError(
+ "appended items dtype do not match existing items dtype in table!"
+ )
+
+ def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
+ """
+ Convert the data from this selection to the appropriate pandas type.
+
+ Parameters
+ ----------
+ values : np.ndarray
+ nan_rep :
+ encoding : str
+ errors : str
+
+ Returns
+ -------
+ index : listlike to become an Index
+ data : ndarraylike to become a column
+ """
+ assert isinstance(values, np.ndarray), type(values)
+
+ # values is a recarray
+ if values.dtype.fields is not None:
+ values = values[self.cname]
+
+ assert self.typ is not None
+ if self.dtype is None:
+ # Note: in tests we never have timedelta64 or datetime64,
+ # so the _get_data_and_dtype_name may be unnecessary
+ converted, dtype_name = _get_data_and_dtype_name(values)
+ kind = _dtype_to_kind(dtype_name)
+ else:
+ converted = values
+ dtype_name = self.dtype
+ kind = self.kind
+
+ assert isinstance(converted, np.ndarray) # for mypy
+
+ # use the meta if needed
+ meta = _ensure_decoded(self.meta)
+ metadata = self.metadata
+ ordered = self.ordered
+ tz = self.tz
+
+ assert dtype_name is not None
+ # convert to the correct dtype
+ dtype = _ensure_decoded(dtype_name)
+
+ # reverse converts
+ if dtype.startswith("datetime64"):
+ # recreate with tz if indicated
+ converted = _set_tz(converted, tz, coerce=True)
+
+ elif dtype == "timedelta64":
+ converted = np.asarray(converted, dtype="m8[ns]")
+ elif dtype == "date":
+ try:
+ converted = np.asarray(
+ [date.fromordinal(v) for v in converted], dtype=object
+ )
+ except ValueError:
+ converted = np.asarray(
+ [date.fromtimestamp(v) for v in converted], dtype=object
+ )
+
+ elif meta == "category":
+ # we have a categorical
+ categories = metadata
+ codes = converted.ravel()
+
+ # if we have stored a NaN in the categories
+ # then strip it; in theory we could have BOTH
+ # -1s in the codes and nulls :<
+ if categories is None:
+ # Handle case of NaN-only categorical columns in which case
+ # the categories are an empty array; when this is stored,
+ # pytables cannot write a zero-len array, so on readback
+ # the categories would be None and `read_hdf()` would fail.
+ categories = Index([], dtype=np.float64)
+ else:
+ mask = isna(categories)
+ if mask.any():
+ categories = categories[~mask]
+ codes[codes != -1] -= mask.astype(int).cumsum()._values
+
+ converted = Categorical.from_codes(
+ codes, categories=categories, ordered=ordered, validate=False
+ )
+
+ else:
+ try:
+ converted = converted.astype(dtype, copy=False)
+ except TypeError:
+ converted = converted.astype("O", copy=False)
+
+ # convert nans / decode
+ if _ensure_decoded(kind) == "string":
+ converted = _unconvert_string_array(
+ converted, nan_rep=nan_rep, encoding=encoding, errors=errors
+ )
+
+ return self.values, converted
+
+ def set_attr(self) -> None:
+ """set the data for this column"""
+ setattr(self.attrs, self.kind_attr, self.values)
+ setattr(self.attrs, self.meta_attr, self.meta)
+ assert self.dtype is not None
+ setattr(self.attrs, self.dtype_attr, self.dtype)
+
+
+class DataIndexableCol(DataCol):
+ """represent a data column that can be indexed"""
+
+ is_data_indexable = True
+
+ def validate_names(self) -> None:
+ if not is_string_dtype(Index(self.values).dtype):
+ # TODO: should the message here be more specifically non-str?
+ raise ValueError("cannot have non-object label DataIndexableCol")
+
+ @classmethod
+ def get_atom_string(cls, shape, itemsize):
+ return _tables().StringCol(itemsize=itemsize)
+
+ @classmethod
+ def get_atom_data(cls, shape, kind: str) -> Col:
+ return cls.get_atom_coltype(kind=kind)()
+
+ @classmethod
+ def get_atom_datetime64(cls, shape):
+ return _tables().Int64Col()
+
+ @classmethod
+ def get_atom_timedelta64(cls, shape):
+ return _tables().Int64Col()
+
+
+class GenericDataIndexableCol(DataIndexableCol):
+ """represent a generic pytables data column"""
+
+
+class Fixed:
+ """
+ represent an object in my store
+ facilitate read/write of various types of objects
+ this is an abstract base class
+
+ Parameters
+ ----------
+ parent : HDFStore
+ group : Node
+ The group node where the table resides.
+ """
+
+ pandas_kind: str
+ format_type: str = "fixed" # GH#30962 needed by dask
+ obj_type: type[DataFrame | Series]
+ ndim: int
+ parent: HDFStore
+ is_table: bool = False
+
+ def __init__(
+ self,
+ parent: HDFStore,
+ group: Node,
+ encoding: str | None = "UTF-8",
+ errors: str = "strict",
+ ) -> None:
+ assert isinstance(parent, HDFStore), type(parent)
+ assert _table_mod is not None # needed for mypy
+ assert isinstance(group, _table_mod.Node), type(group)
+ self.parent = parent
+ self.group = group
+ self.encoding = _ensure_encoding(encoding)
+ self.errors = errors
+
+ @property
+ def is_old_version(self) -> bool:
+ return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1
+
+ @property
+ def version(self) -> tuple[int, int, int]:
+ """compute and set our version"""
+ version = _ensure_decoded(getattr(self.group._v_attrs, "pandas_version", None))
+ try:
+ version = tuple(int(x) for x in version.split("."))
+ if len(version) == 2:
+ version = version + (0,)
+ except AttributeError:
+ version = (0, 0, 0)
+ return version
+
+ @property
+ def pandas_type(self):
+ return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None))
+
+ def __repr__(self) -> str:
+ """return a pretty representation of myself"""
+ self.infer_axes()
+ s = self.shape
+ if s is not None:
+ if isinstance(s, (list, tuple)):
+ jshape = ",".join([pprint_thing(x) for x in s])
+ s = f"[{jshape}]"
+ return f"{self.pandas_type:12.12} (shape->{s})"
+ return self.pandas_type
+
+ def set_object_info(self) -> None:
+ """set my pandas type & version"""
+ self.attrs.pandas_type = str(self.pandas_kind)
+ self.attrs.pandas_version = str(_version)
+
+ def copy(self) -> Fixed:
+ new_self = copy.copy(self)
+ return new_self
+
+ @property
+ def shape(self):
+ return self.nrows
+
+ @property
+ def pathname(self):
+ return self.group._v_pathname
+
+ @property
+ def _handle(self):
+ return self.parent._handle
+
+ @property
+ def _filters(self):
+ return self.parent._filters
+
+ @property
+ def _complevel(self) -> int:
+ return self.parent._complevel
+
+ @property
+ def _fletcher32(self) -> bool:
+ return self.parent._fletcher32
+
+ @property
+ def attrs(self):
+ return self.group._v_attrs
+
+ def set_attrs(self) -> None:
+ """set our object attributes"""
+
+ def get_attrs(self) -> None:
+ """get our object attributes"""
+
+ @property
+ def storable(self):
+ """return my storable"""
+ return self.group
+
+ @property
+ def is_exists(self) -> bool:
+ return False
+
+ @property
+ def nrows(self):
+ return getattr(self.storable, "nrows", None)
+
+ def validate(self, other) -> Literal[True] | None:
+ """validate against an existing storable"""
+ if other is None:
+ return None
+ return True
+
+ def validate_version(self, where=None) -> None:
+ """are we trying to operate on an old version?"""
+
+ def infer_axes(self) -> bool:
+ """
+ infer the axes of my storer
+ return a boolean indicating if we have a valid storer or not
+ """
+ s = self.storable
+ if s is None:
+ return False
+ self.get_attrs()
+ return True
+
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ):
+ raise NotImplementedError(
+ "cannot read on an abstract storer: subclasses should implement"
+ )
+
+ def write(self, obj, **kwargs) -> None:
+ raise NotImplementedError(
+ "cannot write on an abstract storer: subclasses should implement"
+ )
+
+ def delete(
+ self, where=None, start: int | None = None, stop: int | None = None
+ ) -> None:
+ """
+ support fully deleting the node in its entirety (only) - where
+ specification must be None
+ """
+ if com.all_none(where, start, stop):
+ self._handle.remove_node(self.group, recursive=True)
+ return None
+
+ raise TypeError("cannot delete on an abstract storer")
+
+
+class GenericFixed(Fixed):
+ """a generified fixed version"""
+
+ _index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"}
+ _reverse_index_map = {v: k for k, v in _index_type_map.items()}
+ attributes: list[str] = []
+
+ # indexer helpers
+ def _class_to_alias(self, cls) -> str:
+ return self._index_type_map.get(cls, "")
+
+ def _alias_to_class(self, alias):
+ if isinstance(alias, type): # pragma: no cover
+ # compat: for a short period of time master stored types
+ return alias
+ return self._reverse_index_map.get(alias, Index)
+
+ def _get_index_factory(self, attrs):
+ index_class = self._alias_to_class(
+ _ensure_decoded(getattr(attrs, "index_class", ""))
+ )
+
+ factory: Callable
+
+ if index_class == DatetimeIndex:
+
+ def f(values, freq=None, tz=None):
+ # data are already in UTC, localize and convert if tz present
+ dta = DatetimeArray._simple_new(
+ values.values, dtype=values.dtype, freq=freq
+ )
+ result = DatetimeIndex._simple_new(dta, name=None)
+ if tz is not None:
+ result = result.tz_localize("UTC").tz_convert(tz)
+ return result
+
+ factory = f
+ elif index_class == PeriodIndex:
+
+ def f(values, freq=None, tz=None):
+ dtype = PeriodDtype(freq)
+ parr = PeriodArray._simple_new(values, dtype=dtype)
+ return PeriodIndex._simple_new(parr, name=None)
+
+ factory = f
+ else:
+ factory = index_class
+
+ kwargs = {}
+ if "freq" in attrs:
+ kwargs["freq"] = attrs["freq"]
+ if index_class is Index:
+ # DTI/PI would be gotten by _alias_to_class
+ factory = TimedeltaIndex
+
+ if "tz" in attrs:
+ if isinstance(attrs["tz"], bytes):
+ # created by python2
+ kwargs["tz"] = attrs["tz"].decode("utf-8")
+ else:
+ # created by python3
+ kwargs["tz"] = attrs["tz"]
+ assert index_class is DatetimeIndex # just checking
+
+ return factory, kwargs
+
+ def validate_read(self, columns, where) -> None:
+ """
+ raise if any keywords are passed which are not-None
+ """
+ if columns is not None:
+ raise TypeError(
+ "cannot pass a column specification when reading "
+ "a Fixed format store. this store must be selected in its entirety"
+ )
+ if where is not None:
+ raise TypeError(
+ "cannot pass a where specification when reading "
+ "from a Fixed format store. this store must be selected in its entirety"
+ )
+
+ @property
+ def is_exists(self) -> bool:
+ return True
+
+ def set_attrs(self) -> None:
+ """set our object attributes"""
+ self.attrs.encoding = self.encoding
+ self.attrs.errors = self.errors
+
+ def get_attrs(self) -> None:
+ """retrieve our attributes"""
+ self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
+ self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
+ for n in self.attributes:
+ setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
+
+ def write(self, obj, **kwargs) -> None:
+ self.set_attrs()
+
+ def read_array(self, key: str, start: int | None = None, stop: int | None = None):
+ """read an array for the specified node (off of group"""
+ import tables
+
+ node = getattr(self.group, key)
+ attrs = node._v_attrs
+
+ transposed = getattr(attrs, "transposed", False)
+
+ if isinstance(node, tables.VLArray):
+ ret = node[0][start:stop]
+ else:
+ dtype = _ensure_decoded(getattr(attrs, "value_type", None))
+ shape = getattr(attrs, "shape", None)
+
+ if shape is not None:
+ # length 0 axis
+ ret = np.empty(shape, dtype=dtype)
+ else:
+ ret = node[start:stop]
+
+ if dtype and dtype.startswith("datetime64"):
+ # reconstruct a timezone if indicated
+ tz = getattr(attrs, "tz", None)
+ ret = _set_tz(ret, tz, coerce=True)
+
+ elif dtype == "timedelta64":
+ ret = np.asarray(ret, dtype="m8[ns]")
+
+ if transposed:
+ return ret.T
+ else:
+ return ret
+
+ def read_index(
+ self, key: str, start: int | None = None, stop: int | None = None
+ ) -> Index:
+ variety = _ensure_decoded(getattr(self.attrs, f"{key}_variety"))
+
+ if variety == "multi":
+ return self.read_multi_index(key, start=start, stop=stop)
+ elif variety == "regular":
+ node = getattr(self.group, key)
+ index = self.read_index_node(node, start=start, stop=stop)
+ return index
+ else: # pragma: no cover
+ raise TypeError(f"unrecognized index variety: {variety}")
+
+ def write_index(self, key: str, index: Index) -> None:
+ if isinstance(index, MultiIndex):
+ setattr(self.attrs, f"{key}_variety", "multi")
+ self.write_multi_index(key, index)
+ else:
+ setattr(self.attrs, f"{key}_variety", "regular")
+ converted = _convert_index("index", index, self.encoding, self.errors)
+
+ self.write_array(key, converted.values)
+
+ node = getattr(self.group, key)
+ node._v_attrs.kind = converted.kind
+ node._v_attrs.name = index.name
+
+ if isinstance(index, (DatetimeIndex, PeriodIndex)):
+ node._v_attrs.index_class = self._class_to_alias(type(index))
+
+ if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
+ node._v_attrs.freq = index.freq
+
+ if isinstance(index, DatetimeIndex) and index.tz is not None:
+ node._v_attrs.tz = _get_tz(index.tz)
+
+ def write_multi_index(self, key: str, index: MultiIndex) -> None:
+ setattr(self.attrs, f"{key}_nlevels", index.nlevels)
+
+ for i, (lev, level_codes, name) in enumerate(
+ zip(index.levels, index.codes, index.names)
+ ):
+ # write the level
+ if isinstance(lev.dtype, ExtensionDtype):
+ raise NotImplementedError(
+ "Saving a MultiIndex with an extension dtype is not supported."
+ )
+ level_key = f"{key}_level{i}"
+ conv_level = _convert_index(level_key, lev, self.encoding, self.errors)
+ self.write_array(level_key, conv_level.values)
+ node = getattr(self.group, level_key)
+ node._v_attrs.kind = conv_level.kind
+ node._v_attrs.name = name
+
+ # write the name
+ setattr(node._v_attrs, f"{key}_name{name}", name)
+
+ # write the labels
+ label_key = f"{key}_label{i}"
+ self.write_array(label_key, level_codes)
+
+ def read_multi_index(
+ self, key: str, start: int | None = None, stop: int | None = None
+ ) -> MultiIndex:
+ nlevels = getattr(self.attrs, f"{key}_nlevels")
+
+ levels = []
+ codes = []
+ names: list[Hashable] = []
+ for i in range(nlevels):
+ level_key = f"{key}_level{i}"
+ node = getattr(self.group, level_key)
+ lev = self.read_index_node(node, start=start, stop=stop)
+ levels.append(lev)
+ names.append(lev.name)
+
+ label_key = f"{key}_label{i}"
+ level_codes = self.read_array(label_key, start=start, stop=stop)
+ codes.append(level_codes)
+
+ return MultiIndex(
+ levels=levels, codes=codes, names=names, verify_integrity=True
+ )
+
+ def read_index_node(
+ self, node: Node, start: int | None = None, stop: int | None = None
+ ) -> Index:
+ data = node[start:stop]
+ # If the index was an empty array write_array_empty() will
+ # have written a sentinel. Here we replace it with the original.
+ if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0:
+ data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type)
+ kind = _ensure_decoded(node._v_attrs.kind)
+ name = None
+
+ if "name" in node._v_attrs:
+ name = _ensure_str(node._v_attrs.name)
+ name = _ensure_decoded(name)
+
+ attrs = node._v_attrs
+ factory, kwargs = self._get_index_factory(attrs)
+
+ if kind in ("date", "object"):
+ index = factory(
+ _unconvert_index(
+ data, kind, encoding=self.encoding, errors=self.errors
+ ),
+ dtype=object,
+ **kwargs,
+ )
+ else:
+ index = factory(
+ _unconvert_index(
+ data, kind, encoding=self.encoding, errors=self.errors
+ ),
+ **kwargs,
+ )
+
+ index.name = name
+
+ return index
+
+ def write_array_empty(self, key: str, value: ArrayLike) -> None:
+ """write a 0-len array"""
+ # ugly hack for length 0 axes
+ arr = np.empty((1,) * value.ndim)
+ self._handle.create_array(self.group, key, arr)
+ node = getattr(self.group, key)
+ node._v_attrs.value_type = str(value.dtype)
+ node._v_attrs.shape = value.shape
+
+ def write_array(
+ self, key: str, obj: AnyArrayLike, items: Index | None = None
+ ) -> None:
+ # TODO: we only have a few tests that get here, the only EA
+ # that gets passed is DatetimeArray, and we never have
+ # both self._filters and EA
+
+ value = extract_array(obj, extract_numpy=True)
+
+ if key in self.group:
+ self._handle.remove_node(self.group, key)
+
+ # Transform needed to interface with pytables row/col notation
+ empty_array = value.size == 0
+ transposed = False
+
+ if isinstance(value.dtype, CategoricalDtype):
+ raise NotImplementedError(
+ "Cannot store a category dtype in a HDF5 dataset that uses format="
+ '"fixed". Use format="table".'
+ )
+ if not empty_array:
+ if hasattr(value, "T"):
+ # ExtensionArrays (1d) may not have transpose.
+ value = value.T
+ transposed = True
+
+ atom = None
+ if self._filters is not None:
+ with suppress(ValueError):
+ # get the atom for this datatype
+ atom = _tables().Atom.from_dtype(value.dtype)
+
+ if atom is not None:
+ # We only get here if self._filters is non-None and
+ # the Atom.from_dtype call succeeded
+
+ # create an empty chunked array and fill it from value
+ if not empty_array:
+ ca = self._handle.create_carray(
+ self.group, key, atom, value.shape, filters=self._filters
+ )
+ ca[:] = value
+
+ else:
+ self.write_array_empty(key, value)
+
+ elif value.dtype.type == np.object_:
+ # infer the type, warn if we have a non-string type here (for
+ # performance)
+ inferred_type = lib.infer_dtype(value, skipna=False)
+ if empty_array:
+ pass
+ elif inferred_type == "string":
+ pass
+ else:
+ ws = performance_doc % (inferred_type, key, items)
+ warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level())
+
+ vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom())
+ vlarr.append(value)
+
+ elif lib.is_np_dtype(value.dtype, "M"):
+ self._handle.create_array(self.group, key, value.view("i8"))
+ getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
+ elif isinstance(value.dtype, DatetimeTZDtype):
+ # store as UTC
+ # with a zone
+
+ # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
+ # attribute "asi8"
+ self._handle.create_array(
+ self.group, key, value.asi8 # type: ignore[union-attr]
+ )
+
+ node = getattr(self.group, key)
+ # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
+ # attribute "tz"
+ node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr]
+ node._v_attrs.value_type = f"datetime64[{value.dtype.unit}]"
+ elif lib.is_np_dtype(value.dtype, "m"):
+ self._handle.create_array(self.group, key, value.view("i8"))
+ getattr(self.group, key)._v_attrs.value_type = "timedelta64"
+ elif empty_array:
+ self.write_array_empty(key, value)
+ else:
+ self._handle.create_array(self.group, key, value)
+
+ getattr(self.group, key)._v_attrs.transposed = transposed
+
+
+class SeriesFixed(GenericFixed):
+ pandas_kind = "series"
+ attributes = ["name"]
+
+ name: Hashable
+
+ @property
+ def shape(self):
+ try:
+ return (len(self.group.values),)
+ except (TypeError, AttributeError):
+ return None
+
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ) -> Series:
+ self.validate_read(columns, where)
+ index = self.read_index("index", start=start, stop=stop)
+ values = self.read_array("values", start=start, stop=stop)
+ result = Series(values, index=index, name=self.name, copy=False)
+ if using_pyarrow_string_dtype() and is_string_array(values, skipna=True):
+ result = result.astype("string[pyarrow_numpy]")
+ return result
+
+ def write(self, obj, **kwargs) -> None:
+ super().write(obj, **kwargs)
+ self.write_index("index", obj.index)
+ self.write_array("values", obj)
+ self.attrs.name = obj.name
+
+
+class BlockManagerFixed(GenericFixed):
+ attributes = ["ndim", "nblocks"]
+
+ nblocks: int
+
+ @property
+ def shape(self) -> Shape | None:
+ try:
+ ndim = self.ndim
+
+ # items
+ items = 0
+ for i in range(self.nblocks):
+ node = getattr(self.group, f"block{i}_items")
+ shape = getattr(node, "shape", None)
+ if shape is not None:
+ items += shape[0]
+
+ # data shape
+ node = self.group.block0_values
+ shape = getattr(node, "shape", None)
+ if shape is not None:
+ shape = list(shape[0 : (ndim - 1)])
+ else:
+ shape = []
+
+ shape.append(items)
+
+ return shape
+ except AttributeError:
+ return None
+
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ) -> DataFrame:
+ # start, stop applied to rows, so 0th axis only
+ self.validate_read(columns, where)
+ select_axis = self.obj_type()._get_block_manager_axis(0)
+
+ axes = []
+ for i in range(self.ndim):
+ _start, _stop = (start, stop) if i == select_axis else (None, None)
+ ax = self.read_index(f"axis{i}", start=_start, stop=_stop)
+ axes.append(ax)
+
+ items = axes[0]
+ dfs = []
+
+ for i in range(self.nblocks):
+ blk_items = self.read_index(f"block{i}_items")
+ values = self.read_array(f"block{i}_values", start=_start, stop=_stop)
+
+ columns = items[items.get_indexer(blk_items)]
+ df = DataFrame(values.T, columns=columns, index=axes[1], copy=False)
+ if using_pyarrow_string_dtype() and is_string_array(values, skipna=True):
+ df = df.astype("string[pyarrow_numpy]")
+ dfs.append(df)
+
+ if len(dfs) > 0:
+ out = concat(dfs, axis=1, copy=True)
+ if using_copy_on_write():
+ # with CoW, concat ignores the copy keyword. Here, we still want
+ # to copy to enforce optimized column-major layout
+ out = out.copy()
+ out = out.reindex(columns=items, copy=False)
+ return out
+
+ return DataFrame(columns=axes[0], index=axes[1])
+
+ def write(self, obj, **kwargs) -> None:
+ super().write(obj, **kwargs)
+
+ # TODO(ArrayManager) HDFStore relies on accessing the blocks
+ if isinstance(obj._mgr, ArrayManager):
+ obj = obj._as_manager("block")
+
+ data = obj._mgr
+ if not data.is_consolidated():
+ data = data.consolidate()
+
+ self.attrs.ndim = data.ndim
+ for i, ax in enumerate(data.axes):
+ if i == 0 and (not ax.is_unique):
+ raise ValueError("Columns index has to be unique for fixed format")
+ self.write_index(f"axis{i}", ax)
+
+ # Supporting mixed-type DataFrame objects...nontrivial
+ self.attrs.nblocks = len(data.blocks)
+ for i, blk in enumerate(data.blocks):
+ # I have no idea why, but writing values before items fixed #2299
+ blk_items = data.items.take(blk.mgr_locs)
+ self.write_array(f"block{i}_values", blk.values, items=blk_items)
+ self.write_index(f"block{i}_items", blk_items)
+
+
+class FrameFixed(BlockManagerFixed):
+ pandas_kind = "frame"
+ obj_type = DataFrame
+
+
+class Table(Fixed):
+ """
+ represent a table:
+ facilitate read/write of various types of tables
+
+ Attrs in Table Node
+ -------------------
+ These are attributes that are store in the main table node, they are
+ necessary to recreate these tables when read back in.
+
+ index_axes : a list of tuples of the (original indexing axis and
+ index column)
+ non_index_axes: a list of tuples of the (original index axis and
+ columns on a non-indexing axis)
+ values_axes : a list of the columns which comprise the data of this
+ table
+ data_columns : a list of the columns that we are allowing indexing
+ (these become single columns in values_axes)
+ nan_rep : the string to use for nan representations for string
+ objects
+ levels : the names of levels
+ metadata : the names of the metadata columns
+ """
+
+ pandas_kind = "wide_table"
+ format_type: str = "table" # GH#30962 needed by dask
+ table_type: str
+ levels: int | list[Hashable] = 1
+ is_table = True
+
+ metadata: list
+
+ def __init__(
+ self,
+ parent: HDFStore,
+ group: Node,
+ encoding: str | None = None,
+ errors: str = "strict",
+ index_axes: list[IndexCol] | None = None,
+ non_index_axes: list[tuple[AxisInt, Any]] | None = None,
+ values_axes: list[DataCol] | None = None,
+ data_columns: list | None = None,
+ info: dict | None = None,
+ nan_rep=None,
+ ) -> None:
+ super().__init__(parent, group, encoding=encoding, errors=errors)
+ self.index_axes = index_axes or []
+ self.non_index_axes = non_index_axes or []
+ self.values_axes = values_axes or []
+ self.data_columns = data_columns or []
+ self.info = info or {}
+ self.nan_rep = nan_rep
+
+ @property
+ def table_type_short(self) -> str:
+ return self.table_type.split("_")[0]
+
+ def __repr__(self) -> str:
+ """return a pretty representation of myself"""
+ self.infer_axes()
+ jdc = ",".join(self.data_columns) if len(self.data_columns) else ""
+ dc = f",dc->[{jdc}]"
+
+ ver = ""
+ if self.is_old_version:
+ jver = ".".join([str(x) for x in self.version])
+ ver = f"[{jver}]"
+
+ jindex_axes = ",".join([a.name for a in self.index_axes])
+ return (
+ f"{self.pandas_type:12.12}{ver} "
+ f"(typ->{self.table_type_short},nrows->{self.nrows},"
+ f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})"
+ )
+
+ def __getitem__(self, c: str):
+ """return the axis for c"""
+ for a in self.axes:
+ if c == a.name:
+ return a
+ return None
+
+ def validate(self, other) -> None:
+ """validate against an existing table"""
+ if other is None:
+ return
+
+ if other.table_type != self.table_type:
+ raise TypeError(
+ "incompatible table_type with existing "
+ f"[{other.table_type} - {self.table_type}]"
+ )
+
+ for c in ["index_axes", "non_index_axes", "values_axes"]:
+ sv = getattr(self, c, None)
+ ov = getattr(other, c, None)
+ if sv != ov:
+ # show the error for the specific axes
+ # Argument 1 to "enumerate" has incompatible type
+ # "Optional[Any]"; expected "Iterable[Any]" [arg-type]
+ for i, sax in enumerate(sv): # type: ignore[arg-type]
+ # Value of type "Optional[Any]" is not indexable [index]
+ oax = ov[i] # type: ignore[index]
+ if sax != oax:
+ raise ValueError(
+ f"invalid combination of [{c}] on appending data "
+ f"[{sax}] vs current table [{oax}]"
+ )
+
+ # should never get here
+ raise Exception(
+ f"invalid combination of [{c}] on appending data [{sv}] vs "
+ f"current table [{ov}]"
+ )
+
+ @property
+ def is_multi_index(self) -> bool:
+ """the levels attribute is 1 or a list in the case of a multi-index"""
+ return isinstance(self.levels, list)
+
+ def validate_multiindex(
+ self, obj: DataFrame | Series
+ ) -> tuple[DataFrame, list[Hashable]]:
+ """
+ validate that we can store the multi-index; reset and return the
+ new object
+ """
+ levels = com.fill_missing_names(obj.index.names)
+ try:
+ reset_obj = obj.reset_index()
+ except ValueError as err:
+ raise ValueError(
+ "duplicate names/columns in the multi-index when storing as a table"
+ ) from err
+ assert isinstance(reset_obj, DataFrame) # for mypy
+ return reset_obj, levels
+
+ @property
+ def nrows_expected(self) -> int:
+ """based on our axes, compute the expected nrows"""
+ return np.prod([i.cvalues.shape[0] for i in self.index_axes])
+
+ @property
+ def is_exists(self) -> bool:
+ """has this table been created"""
+ return "table" in self.group
+
+ @property
+ def storable(self):
+ return getattr(self.group, "table", None)
+
+ @property
+ def table(self):
+ """return the table group (this is my storable)"""
+ return self.storable
+
+ @property
+ def dtype(self):
+ return self.table.dtype
+
+ @property
+ def description(self):
+ return self.table.description
+
+ @property
+ def axes(self) -> itertools.chain[IndexCol]:
+ return itertools.chain(self.index_axes, self.values_axes)
+
+ @property
+ def ncols(self) -> int:
+ """the number of total columns in the values axes"""
+ return sum(len(a.values) for a in self.values_axes)
+
+ @property
+ def is_transposed(self) -> bool:
+ return False
+
+ @property
+ def data_orientation(self) -> tuple[int, ...]:
+ """return a tuple of my permutated axes, non_indexable at the front"""
+ return tuple(
+ itertools.chain(
+ [int(a[0]) for a in self.non_index_axes],
+ [int(a.axis) for a in self.index_axes],
+ )
+ )
+
+ def queryables(self) -> dict[str, Any]:
+ """return a dict of the kinds allowable columns for this object"""
+ # mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here
+ axis_names = {0: "index", 1: "columns"}
+
+ # compute the values_axes queryables
+ d1 = [(a.cname, a) for a in self.index_axes]
+ d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes]
+ d3 = [
+ (v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)
+ ]
+
+ return dict(d1 + d2 + d3)
+
+ def index_cols(self):
+ """return a list of my index cols"""
+ # Note: each `i.cname` below is assured to be a str.
+ return [(i.axis, i.cname) for i in self.index_axes]
+
+ def values_cols(self) -> list[str]:
+ """return a list of my values cols"""
+ return [i.cname for i in self.values_axes]
+
+ def _get_metadata_path(self, key: str) -> str:
+ """return the metadata pathname for this key"""
+ group = self.group._v_pathname
+ return f"{group}/meta/{key}/meta"
+
+ def write_metadata(self, key: str, values: np.ndarray) -> None:
+ """
+ Write out a metadata array to the key as a fixed-format Series.
+
+ Parameters
+ ----------
+ key : str
+ values : ndarray
+ """
+ self.parent.put(
+ self._get_metadata_path(key),
+ Series(values, copy=False),
+ format="table",
+ encoding=self.encoding,
+ errors=self.errors,
+ nan_rep=self.nan_rep,
+ )
+
+ def read_metadata(self, key: str):
+ """return the meta data array for this key"""
+ if getattr(getattr(self.group, "meta", None), key, None) is not None:
+ return self.parent.select(self._get_metadata_path(key))
+ return None
+
+ def set_attrs(self) -> None:
+ """set our table type & indexables"""
+ self.attrs.table_type = str(self.table_type)
+ self.attrs.index_cols = self.index_cols()
+ self.attrs.values_cols = self.values_cols()
+ self.attrs.non_index_axes = self.non_index_axes
+ self.attrs.data_columns = self.data_columns
+ self.attrs.nan_rep = self.nan_rep
+ self.attrs.encoding = self.encoding
+ self.attrs.errors = self.errors
+ self.attrs.levels = self.levels
+ self.attrs.info = self.info
+
+ def get_attrs(self) -> None:
+ """retrieve our attributes"""
+ self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or []
+ self.data_columns = getattr(self.attrs, "data_columns", None) or []
+ self.info = getattr(self.attrs, "info", None) or {}
+ self.nan_rep = getattr(self.attrs, "nan_rep", None)
+ self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
+ self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
+ self.levels: list[Hashable] = getattr(self.attrs, "levels", None) or []
+ self.index_axes = [a for a in self.indexables if a.is_an_indexable]
+ self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
+
+ def validate_version(self, where=None) -> None:
+ """are we trying to operate on an old version?"""
+ if where is not None:
+ if self.is_old_version:
+ ws = incompatibility_doc % ".".join([str(x) for x in self.version])
+ warnings.warn(
+ ws,
+ IncompatibilityWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ def validate_min_itemsize(self, min_itemsize) -> None:
+ """
+ validate the min_itemsize doesn't contain items that are not in the
+ axes this needs data_columns to be defined
+ """
+ if min_itemsize is None:
+ return
+ if not isinstance(min_itemsize, dict):
+ return
+
+ q = self.queryables()
+ for k in min_itemsize:
+ # ok, apply generally
+ if k == "values":
+ continue
+ if k not in q:
+ raise ValueError(
+ f"min_itemsize has the key [{k}] which is not an axis or "
+ "data_column"
+ )
+
+ @cache_readonly
+ def indexables(self):
+ """create/cache the indexables if they don't exist"""
+ _indexables = []
+
+ desc = self.description
+ table_attrs = self.table.attrs
+
+ # Note: each of the `name` kwargs below are str, ensured
+ # by the definition in index_cols.
+ # index columns
+ for i, (axis, name) in enumerate(self.attrs.index_cols):
+ atom = getattr(desc, name)
+ md = self.read_metadata(name)
+ meta = "category" if md is not None else None
+
+ kind_attr = f"{name}_kind"
+ kind = getattr(table_attrs, kind_attr, None)
+
+ index_col = IndexCol(
+ name=name,
+ axis=axis,
+ pos=i,
+ kind=kind,
+ typ=atom,
+ table=self.table,
+ meta=meta,
+ metadata=md,
+ )
+ _indexables.append(index_col)
+
+ # values columns
+ dc = set(self.data_columns)
+ base_pos = len(_indexables)
+
+ def f(i, c):
+ assert isinstance(c, str)
+ klass = DataCol
+ if c in dc:
+ klass = DataIndexableCol
+
+ atom = getattr(desc, c)
+ adj_name = _maybe_adjust_name(c, self.version)
+
+ # TODO: why kind_attr here?
+ values = getattr(table_attrs, f"{adj_name}_kind", None)
+ dtype = getattr(table_attrs, f"{adj_name}_dtype", None)
+ # Argument 1 to "_dtype_to_kind" has incompatible type
+ # "Optional[Any]"; expected "str" [arg-type]
+ kind = _dtype_to_kind(dtype) # type: ignore[arg-type]
+
+ md = self.read_metadata(c)
+ # TODO: figure out why these two versions of `meta` dont always match.
+ # meta = "category" if md is not None else None
+ meta = getattr(table_attrs, f"{adj_name}_meta", None)
+
+ obj = klass(
+ name=adj_name,
+ cname=c,
+ values=values,
+ kind=kind,
+ pos=base_pos + i,
+ typ=atom,
+ table=self.table,
+ meta=meta,
+ metadata=md,
+ dtype=dtype,
+ )
+ return obj
+
+ # Note: the definition of `values_cols` ensures that each
+ # `c` below is a str.
+ _indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)])
+
+ return _indexables
+
+ def create_index(
+ self, columns=None, optlevel=None, kind: str | None = None
+ ) -> None:
+ """
+ Create a pytables index on the specified columns.
+
+ Parameters
+ ----------
+ columns : None, bool, or listlike[str]
+ Indicate which columns to create an index on.
+
+ * False : Do not create any indexes.
+ * True : Create indexes on all columns.
+ * None : Create indexes on all columns.
+ * listlike : Create indexes on the given columns.
+
+ optlevel : int or None, default None
+ Optimization level, if None, pytables defaults to 6.
+ kind : str or None, default None
+ Kind of index, if None, pytables defaults to "medium".
+
+ Raises
+ ------
+ TypeError if trying to create an index on a complex-type column.
+
+ Notes
+ -----
+ Cannot index Time64Col or ComplexCol.
+ Pytables must be >= 3.0.
+ """
+ if not self.infer_axes():
+ return
+ if columns is False:
+ return
+
+ # index all indexables and data_columns
+ if columns is None or columns is True:
+ columns = [a.cname for a in self.axes if a.is_data_indexable]
+ if not isinstance(columns, (tuple, list)):
+ columns = [columns]
+
+ kw = {}
+ if optlevel is not None:
+ kw["optlevel"] = optlevel
+ if kind is not None:
+ kw["kind"] = kind
+
+ table = self.table
+ for c in columns:
+ v = getattr(table.cols, c, None)
+ if v is not None:
+ # remove the index if the kind/optlevel have changed
+ if v.is_indexed:
+ index = v.index
+ cur_optlevel = index.optlevel
+ cur_kind = index.kind
+
+ if kind is not None and cur_kind != kind:
+ v.remove_index()
+ else:
+ kw["kind"] = cur_kind
+
+ if optlevel is not None and cur_optlevel != optlevel:
+ v.remove_index()
+ else:
+ kw["optlevel"] = cur_optlevel
+
+ # create the index
+ if not v.is_indexed:
+ if v.type.startswith("complex"):
+ raise TypeError(
+ "Columns containing complex values can be stored but "
+ "cannot be indexed when using table format. Either use "
+ "fixed format, set index=False, or do not include "
+ "the columns containing complex values to "
+ "data_columns when initializing the table."
+ )
+ v.create_index(**kw)
+ elif c in self.non_index_axes[0][1]:
+ # GH 28156
+ raise AttributeError(
+ f"column {c} is not a data_column.\n"
+ f"In order to read column {c} you must reload the dataframe \n"
+ f"into HDFStore and include {c} with the data_columns argument."
+ )
+
+ def _read_axes(
+ self, where, start: int | None = None, stop: int | None = None
+ ) -> list[tuple[np.ndarray, np.ndarray] | tuple[Index, Index]]:
+ """
+ Create the axes sniffed from the table.
+
+ Parameters
+ ----------
+ where : ???
+ start : int or None, default None
+ stop : int or None, default None
+
+ Returns
+ -------
+ List[Tuple[index_values, column_values]]
+ """
+ # create the selection
+ selection = Selection(self, where=where, start=start, stop=stop)
+ values = selection.select()
+
+ results = []
+ # convert the data
+ for a in self.axes:
+ a.set_info(self.info)
+ res = a.convert(
+ values,
+ nan_rep=self.nan_rep,
+ encoding=self.encoding,
+ errors=self.errors,
+ )
+ results.append(res)
+
+ return results
+
+ @classmethod
+ def get_object(cls, obj, transposed: bool):
+ """return the data for this obj"""
+ return obj
+
+ def validate_data_columns(self, data_columns, min_itemsize, non_index_axes):
+ """
+ take the input data_columns and min_itemize and create a data
+ columns spec
+ """
+ if not len(non_index_axes):
+ return []
+
+ axis, axis_labels = non_index_axes[0]
+ info = self.info.get(axis, {})
+ if info.get("type") == "MultiIndex" and data_columns:
+ raise ValueError(
+ f"cannot use a multi-index on axis [{axis}] with "
+ f"data_columns {data_columns}"
+ )
+
+ # evaluate the passed data_columns, True == use all columns
+ # take only valid axis labels
+ if data_columns is True:
+ data_columns = list(axis_labels)
+ elif data_columns is None:
+ data_columns = []
+
+ # if min_itemsize is a dict, add the keys (exclude 'values')
+ if isinstance(min_itemsize, dict):
+ existing_data_columns = set(data_columns)
+ data_columns = list(data_columns) # ensure we do not modify
+ data_columns.extend(
+ [
+ k
+ for k in min_itemsize.keys()
+ if k != "values" and k not in existing_data_columns
+ ]
+ )
+
+ # return valid columns in the order of our axis
+ return [c for c in data_columns if c in axis_labels]
+
+ def _create_axes(
+ self,
+ axes,
+ obj: DataFrame,
+ validate: bool = True,
+ nan_rep=None,
+ data_columns=None,
+ min_itemsize=None,
+ ):
+ """
+ Create and return the axes.
+
+ Parameters
+ ----------
+ axes: list or None
+ The names or numbers of the axes to create.
+ obj : DataFrame
+ The object to create axes on.
+ validate: bool, default True
+ Whether to validate the obj against an existing object already written.
+ nan_rep :
+ A value to use for string column nan_rep.
+ data_columns : List[str], True, or None, default None
+ Specify the columns that we want to create to allow indexing on.
+
+ * True : Use all available columns.
+ * None : Use no columns.
+ * List[str] : Use the specified columns.
+
+ min_itemsize: Dict[str, int] or None, default None
+ The min itemsize for a column in bytes.
+ """
+ if not isinstance(obj, DataFrame):
+ group = self.group._v_name
+ raise TypeError(
+ f"cannot properly create the storer for: [group->{group},"
+ f"value->{type(obj)}]"
+ )
+
+ # set the default axes if needed
+ if axes is None:
+ axes = [0]
+
+ # map axes to numbers
+ axes = [obj._get_axis_number(a) for a in axes]
+
+ # do we have an existing table (if so, use its axes & data_columns)
+ if self.infer_axes():
+ table_exists = True
+ axes = [a.axis for a in self.index_axes]
+ data_columns = list(self.data_columns)
+ nan_rep = self.nan_rep
+ # TODO: do we always have validate=True here?
+ else:
+ table_exists = False
+
+ new_info = self.info
+
+ assert self.ndim == 2 # with next check, we must have len(axes) == 1
+ # currently support on ndim-1 axes
+ if len(axes) != self.ndim - 1:
+ raise ValueError(
+ "currently only support ndim-1 indexers in an AppendableTable"
+ )
+
+ # create according to the new data
+ new_non_index_axes: list = []
+
+ # nan_representation
+ if nan_rep is None:
+ nan_rep = "nan"
+
+ # We construct the non-index-axis first, since that alters new_info
+ idx = next(x for x in [0, 1] if x not in axes)
+
+ a = obj.axes[idx]
+ # we might be able to change the axes on the appending data if necessary
+ append_axis = list(a)
+ if table_exists:
+ indexer = len(new_non_index_axes) # i.e. 0
+ exist_axis = self.non_index_axes[indexer][1]
+ if not array_equivalent(
+ np.array(append_axis),
+ np.array(exist_axis),
+ strict_nan=True,
+ dtype_equal=True,
+ ):
+ # ahah! -> reindex
+ if array_equivalent(
+ np.array(sorted(append_axis)),
+ np.array(sorted(exist_axis)),
+ strict_nan=True,
+ dtype_equal=True,
+ ):
+ append_axis = exist_axis
+
+ # the non_index_axes info
+ info = new_info.setdefault(idx, {})
+ info["names"] = list(a.names)
+ info["type"] = type(a).__name__
+
+ new_non_index_axes.append((idx, append_axis))
+
+ # Now we can construct our new index axis
+ idx = axes[0]
+ a = obj.axes[idx]
+ axis_name = obj._get_axis_name(idx)
+ new_index = _convert_index(axis_name, a, self.encoding, self.errors)
+ new_index.axis = idx
+
+ # Because we are always 2D, there is only one new_index, so
+ # we know it will have pos=0
+ new_index.set_pos(0)
+ new_index.update_info(new_info)
+ new_index.maybe_set_size(min_itemsize) # check for column conflicts
+
+ new_index_axes = [new_index]
+ j = len(new_index_axes) # i.e. 1
+ assert j == 1
+
+ # reindex by our non_index_axes & compute data_columns
+ assert len(new_non_index_axes) == 1
+ for a in new_non_index_axes:
+ obj = _reindex_axis(obj, a[0], a[1])
+
+ transposed = new_index.axis == 1
+
+ # figure out data_columns and get out blocks
+ data_columns = self.validate_data_columns(
+ data_columns, min_itemsize, new_non_index_axes
+ )
+
+ frame = self.get_object(obj, transposed)._consolidate()
+
+ blocks, blk_items = self._get_blocks_and_items(
+ frame, table_exists, new_non_index_axes, self.values_axes, data_columns
+ )
+
+ # add my values
+ vaxes = []
+ for i, (blk, b_items) in enumerate(zip(blocks, blk_items)):
+ # shape of the data column are the indexable axes
+ klass = DataCol
+ name = None
+
+ # we have a data_column
+ if data_columns and len(b_items) == 1 and b_items[0] in data_columns:
+ klass = DataIndexableCol
+ name = b_items[0]
+ if not (name is None or isinstance(name, str)):
+ # TODO: should the message here be more specifically non-str?
+ raise ValueError("cannot have non-object label DataIndexableCol")
+
+ # make sure that we match up the existing columns
+ # if we have an existing table
+ existing_col: DataCol | None
+
+ if table_exists and validate:
+ try:
+ existing_col = self.values_axes[i]
+ except (IndexError, KeyError) as err:
+ raise ValueError(
+ f"Incompatible appended table [{blocks}]"
+ f"with existing table [{self.values_axes}]"
+ ) from err
+ else:
+ existing_col = None
+
+ new_name = name or f"values_block_{i}"
+ data_converted = _maybe_convert_for_string_atom(
+ new_name,
+ blk.values,
+ existing_col=existing_col,
+ min_itemsize=min_itemsize,
+ nan_rep=nan_rep,
+ encoding=self.encoding,
+ errors=self.errors,
+ columns=b_items,
+ )
+ adj_name = _maybe_adjust_name(new_name, self.version)
+
+ typ = klass._get_atom(data_converted)
+ kind = _dtype_to_kind(data_converted.dtype.name)
+ tz = None
+ if getattr(data_converted, "tz", None) is not None:
+ tz = _get_tz(data_converted.tz)
+
+ meta = metadata = ordered = None
+ if isinstance(data_converted.dtype, CategoricalDtype):
+ ordered = data_converted.ordered
+ meta = "category"
+ metadata = np.asarray(data_converted.categories).ravel()
+
+ data, dtype_name = _get_data_and_dtype_name(data_converted)
+
+ col = klass(
+ name=adj_name,
+ cname=new_name,
+ values=list(b_items),
+ typ=typ,
+ pos=j,
+ kind=kind,
+ tz=tz,
+ ordered=ordered,
+ meta=meta,
+ metadata=metadata,
+ dtype=dtype_name,
+ data=data,
+ )
+ col.update_info(new_info)
+
+ vaxes.append(col)
+
+ j += 1
+
+ dcs = [col.name for col in vaxes if col.is_data_indexable]
+
+ new_table = type(self)(
+ parent=self.parent,
+ group=self.group,
+ encoding=self.encoding,
+ errors=self.errors,
+ index_axes=new_index_axes,
+ non_index_axes=new_non_index_axes,
+ values_axes=vaxes,
+ data_columns=dcs,
+ info=new_info,
+ nan_rep=nan_rep,
+ )
+ if hasattr(self, "levels"):
+ # TODO: get this into constructor, only for appropriate subclass
+ new_table.levels = self.levels
+
+ new_table.validate_min_itemsize(min_itemsize)
+
+ if validate and table_exists:
+ new_table.validate(self)
+
+ return new_table
+
+ @staticmethod
+ def _get_blocks_and_items(
+ frame: DataFrame,
+ table_exists: bool,
+ new_non_index_axes,
+ values_axes,
+ data_columns,
+ ):
+ # Helper to clarify non-state-altering parts of _create_axes
+
+ # TODO(ArrayManager) HDFStore relies on accessing the blocks
+ if isinstance(frame._mgr, ArrayManager):
+ frame = frame._as_manager("block")
+
+ def get_blk_items(mgr):
+ return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks]
+
+ mgr = frame._mgr
+ mgr = cast(BlockManager, mgr)
+ blocks: list[Block] = list(mgr.blocks)
+ blk_items: list[Index] = get_blk_items(mgr)
+
+ if len(data_columns):
+ # TODO: prove that we only get here with axis == 1?
+ # It is the case in all extant tests, but NOT the case
+ # outside this `if len(data_columns)` check.
+
+ axis, axis_labels = new_non_index_axes[0]
+ new_labels = Index(axis_labels).difference(Index(data_columns))
+ mgr = frame.reindex(new_labels, axis=axis)._mgr
+ mgr = cast(BlockManager, mgr)
+
+ blocks = list(mgr.blocks)
+ blk_items = get_blk_items(mgr)
+ for c in data_columns:
+ # This reindex would raise ValueError if we had a duplicate
+ # index, so we can infer that (as long as axis==1) we
+ # get a single column back, so a single block.
+ mgr = frame.reindex([c], axis=axis)._mgr
+ mgr = cast(BlockManager, mgr)
+ blocks.extend(mgr.blocks)
+ blk_items.extend(get_blk_items(mgr))
+
+ # reorder the blocks in the same order as the existing table if we can
+ if table_exists:
+ by_items = {
+ tuple(b_items.tolist()): (b, b_items)
+ for b, b_items in zip(blocks, blk_items)
+ }
+ new_blocks: list[Block] = []
+ new_blk_items = []
+ for ea in values_axes:
+ items = tuple(ea.values)
+ try:
+ b, b_items = by_items.pop(items)
+ new_blocks.append(b)
+ new_blk_items.append(b_items)
+ except (IndexError, KeyError) as err:
+ jitems = ",".join([pprint_thing(item) for item in items])
+ raise ValueError(
+ f"cannot match existing table structure for [{jitems}] "
+ "on appending data"
+ ) from err
+ blocks = new_blocks
+ blk_items = new_blk_items
+
+ return blocks, blk_items
+
+ def process_axes(self, obj, selection: Selection, columns=None) -> DataFrame:
+ """process axes filters"""
+ # make a copy to avoid side effects
+ if columns is not None:
+ columns = list(columns)
+
+ # make sure to include levels if we have them
+ if columns is not None and self.is_multi_index:
+ assert isinstance(self.levels, list) # assured by is_multi_index
+ for n in self.levels:
+ if n not in columns:
+ columns.insert(0, n)
+
+ # reorder by any non_index_axes & limit to the select columns
+ for axis, labels in self.non_index_axes:
+ obj = _reindex_axis(obj, axis, labels, columns)
+
+ def process_filter(field, filt, op):
+ for axis_name in obj._AXIS_ORDERS:
+ axis_number = obj._get_axis_number(axis_name)
+ axis_values = obj._get_axis(axis_name)
+ assert axis_number is not None
+
+ # see if the field is the name of an axis
+ if field == axis_name:
+ # if we have a multi-index, then need to include
+ # the levels
+ if self.is_multi_index:
+ filt = filt.union(Index(self.levels))
+
+ takers = op(axis_values, filt)
+ return obj.loc(axis=axis_number)[takers]
+
+ # this might be the name of a file IN an axis
+ elif field in axis_values:
+ # we need to filter on this dimension
+ values = ensure_index(getattr(obj, field).values)
+ filt = ensure_index(filt)
+
+ # hack until we support reversed dim flags
+ if isinstance(obj, DataFrame):
+ axis_number = 1 - axis_number
+
+ takers = op(values, filt)
+ return obj.loc(axis=axis_number)[takers]
+
+ raise ValueError(f"cannot find the field [{field}] for filtering!")
+
+ # apply the selection filters (but keep in the same order)
+ if selection.filter is not None:
+ for field, op, filt in selection.filter.format():
+ obj = process_filter(field, filt, op)
+
+ return obj
+
+ def create_description(
+ self,
+ complib,
+ complevel: int | None,
+ fletcher32: bool,
+ expectedrows: int | None,
+ ) -> dict[str, Any]:
+ """create the description of the table from the axes & values"""
+ # provided expected rows if its passed
+ if expectedrows is None:
+ expectedrows = max(self.nrows_expected, 10000)
+
+ d = {"name": "table", "expectedrows": expectedrows}
+
+ # description from the axes & values
+ d["description"] = {a.cname: a.typ for a in self.axes}
+
+ if complib:
+ if complevel is None:
+ complevel = self._complevel or 9
+ filters = _tables().Filters(
+ complevel=complevel,
+ complib=complib,
+ fletcher32=fletcher32 or self._fletcher32,
+ )
+ d["filters"] = filters
+ elif self._filters is not None:
+ d["filters"] = self._filters
+
+ return d
+
+ def read_coordinates(
+ self, where=None, start: int | None = None, stop: int | None = None
+ ):
+ """
+ select coordinates (row numbers) from a table; return the
+ coordinates object
+ """
+ # validate the version
+ self.validate_version(where)
+
+ # infer the data kind
+ if not self.infer_axes():
+ return False
+
+ # create the selection
+ selection = Selection(self, where=where, start=start, stop=stop)
+ coords = selection.select_coords()
+ if selection.filter is not None:
+ for field, op, filt in selection.filter.format():
+ data = self.read_column(
+ field, start=coords.min(), stop=coords.max() + 1
+ )
+ coords = coords[op(data.iloc[coords - coords.min()], filt).values]
+
+ return Index(coords)
+
+ def read_column(
+ self,
+ column: str,
+ where=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ):
+ """
+ return a single column from the table, generally only indexables
+ are interesting
+ """
+ # validate the version
+ self.validate_version()
+
+ # infer the data kind
+ if not self.infer_axes():
+ return False
+
+ if where is not None:
+ raise TypeError("read_column does not currently accept a where clause")
+
+ # find the axes
+ for a in self.axes:
+ if column == a.name:
+ if not a.is_data_indexable:
+ raise ValueError(
+ f"column [{column}] can not be extracted individually; "
+ "it is not data indexable"
+ )
+
+ # column must be an indexable or a data column
+ c = getattr(self.table.cols, column)
+ a.set_info(self.info)
+ col_values = a.convert(
+ c[start:stop],
+ nan_rep=self.nan_rep,
+ encoding=self.encoding,
+ errors=self.errors,
+ )
+ return Series(_set_tz(col_values[1], a.tz), name=column, copy=False)
+
+ raise KeyError(f"column [{column}] not found in the table")
+
+
+class WORMTable(Table):
+ """
+ a write-once read-many table: this format DOES NOT ALLOW appending to a
+ table. writing is a one-time operation the data are stored in a format
+ that allows for searching the data on disk
+ """
+
+ table_type = "worm"
+
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ):
+ """
+ read the indices and the indexing array, calculate offset rows and return
+ """
+ raise NotImplementedError("WORMTable needs to implement read")
+
+ def write(self, obj, **kwargs) -> None:
+ """
+ write in a format that we can search later on (but cannot append
+ to): write out the indices and the values using _write_array
+ (e.g. a CArray) create an indexing table so that we can search
+ """
+ raise NotImplementedError("WORMTable needs to implement write")
+
+
+class AppendableTable(Table):
+ """support the new appendable table formats"""
+
+ table_type = "appendable"
+
+ # error: Signature of "write" incompatible with supertype "Fixed"
+ def write( # type: ignore[override]
+ self,
+ obj,
+ axes=None,
+ append: bool = False,
+ complib=None,
+ complevel=None,
+ fletcher32=None,
+ min_itemsize=None,
+ chunksize: int | None = None,
+ expectedrows=None,
+ dropna: bool = False,
+ nan_rep=None,
+ data_columns=None,
+ track_times: bool = True,
+ ) -> None:
+ if not append and self.is_exists:
+ self._handle.remove_node(self.group, "table")
+
+ # create the axes
+ table = self._create_axes(
+ axes=axes,
+ obj=obj,
+ validate=append,
+ min_itemsize=min_itemsize,
+ nan_rep=nan_rep,
+ data_columns=data_columns,
+ )
+
+ for a in table.axes:
+ a.validate_names()
+
+ if not table.is_exists:
+ # create the table
+ options = table.create_description(
+ complib=complib,
+ complevel=complevel,
+ fletcher32=fletcher32,
+ expectedrows=expectedrows,
+ )
+
+ # set the table attributes
+ table.set_attrs()
+
+ options["track_times"] = track_times
+
+ # create the table
+ table._handle.create_table(table.group, **options)
+
+ # update my info
+ table.attrs.info = table.info
+
+ # validate the axes and set the kinds
+ for a in table.axes:
+ a.validate_and_set(table, append)
+
+ # add the rows
+ table.write_data(chunksize, dropna=dropna)
+
+ def write_data(self, chunksize: int | None, dropna: bool = False) -> None:
+ """
+ we form the data into a 2-d including indexes,values,mask write chunk-by-chunk
+ """
+ names = self.dtype.names
+ nrows = self.nrows_expected
+
+ # if dropna==True, then drop ALL nan rows
+ masks = []
+ if dropna:
+ for a in self.values_axes:
+ # figure the mask: only do if we can successfully process this
+ # column, otherwise ignore the mask
+ mask = isna(a.data).all(axis=0)
+ if isinstance(mask, np.ndarray):
+ masks.append(mask.astype("u1", copy=False))
+
+ # consolidate masks
+ if len(masks):
+ mask = masks[0]
+ for m in masks[1:]:
+ mask = mask & m
+ mask = mask.ravel()
+ else:
+ mask = None
+
+ # broadcast the indexes if needed
+ indexes = [a.cvalues for a in self.index_axes]
+ nindexes = len(indexes)
+ assert nindexes == 1, nindexes # ensures we dont need to broadcast
+
+ # transpose the values so first dimension is last
+ # reshape the values if needed
+ values = [a.take_data() for a in self.values_axes]
+ values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values]
+ bvalues = []
+ for i, v in enumerate(values):
+ new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
+ bvalues.append(v.reshape(new_shape))
+
+ # write the chunks
+ if chunksize is None:
+ chunksize = 100000
+
+ rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
+ chunks = nrows // chunksize + 1
+ for i in range(chunks):
+ start_i = i * chunksize
+ end_i = min((i + 1) * chunksize, nrows)
+ if start_i >= end_i:
+ break
+
+ self.write_data_chunk(
+ rows,
+ indexes=[a[start_i:end_i] for a in indexes],
+ mask=mask[start_i:end_i] if mask is not None else None,
+ values=[v[start_i:end_i] for v in bvalues],
+ )
+
+ def write_data_chunk(
+ self,
+ rows: np.ndarray,
+ indexes: list[np.ndarray],
+ mask: npt.NDArray[np.bool_] | None,
+ values: list[np.ndarray],
+ ) -> None:
+ """
+ Parameters
+ ----------
+ rows : an empty memory space where we are putting the chunk
+ indexes : an array of the indexes
+ mask : an array of the masks
+ values : an array of the values
+ """
+ # 0 len
+ for v in values:
+ if not np.prod(v.shape):
+ return
+
+ nrows = indexes[0].shape[0]
+ if nrows != len(rows):
+ rows = np.empty(nrows, dtype=self.dtype)
+ names = self.dtype.names
+ nindexes = len(indexes)
+
+ # indexes
+ for i, idx in enumerate(indexes):
+ rows[names[i]] = idx
+
+ # values
+ for i, v in enumerate(values):
+ rows[names[i + nindexes]] = v
+
+ # mask
+ if mask is not None:
+ m = ~mask.ravel().astype(bool, copy=False)
+ if not m.all():
+ rows = rows[m]
+
+ if len(rows):
+ self.table.append(rows)
+ self.table.flush()
+
+ def delete(self, where=None, start: int | None = None, stop: int | None = None):
+ # delete all rows (and return the nrows)
+ if where is None or not len(where):
+ if start is None and stop is None:
+ nrows = self.nrows
+ self._handle.remove_node(self.group, recursive=True)
+ else:
+ # pytables<3.0 would remove a single row with stop=None
+ if stop is None:
+ stop = self.nrows
+ nrows = self.table.remove_rows(start=start, stop=stop)
+ self.table.flush()
+ return nrows
+
+ # infer the data kind
+ if not self.infer_axes():
+ return None
+
+ # create the selection
+ table = self.table
+ selection = Selection(self, where, start=start, stop=stop)
+ values = selection.select_coords()
+
+ # delete the rows in reverse order
+ sorted_series = Series(values, copy=False).sort_values()
+ ln = len(sorted_series)
+
+ if ln:
+ # construct groups of consecutive rows
+ diff = sorted_series.diff()
+ groups = list(diff[diff > 1].index)
+
+ # 1 group
+ if not len(groups):
+ groups = [0]
+
+ # final element
+ if groups[-1] != ln:
+ groups.append(ln)
+
+ # initial element
+ if groups[0] != 0:
+ groups.insert(0, 0)
+
+ # we must remove in reverse order!
+ pg = groups.pop()
+ for g in reversed(groups):
+ rows = sorted_series.take(range(g, pg))
+ table.remove_rows(
+ start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1
+ )
+ pg = g
+
+ self.table.flush()
+
+ # return the number of rows removed
+ return ln
+
+
+class AppendableFrameTable(AppendableTable):
+ """support the new appendable table formats"""
+
+ pandas_kind = "frame_table"
+ table_type = "appendable_frame"
+ ndim = 2
+ obj_type: type[DataFrame | Series] = DataFrame
+
+ @property
+ def is_transposed(self) -> bool:
+ return self.index_axes[0].axis == 1
+
+ @classmethod
+ def get_object(cls, obj, transposed: bool):
+ """these are written transposed"""
+ if transposed:
+ obj = obj.T
+ return obj
+
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ):
+ # validate the version
+ self.validate_version(where)
+
+ # infer the data kind
+ if not self.infer_axes():
+ return None
+
+ result = self._read_axes(where=where, start=start, stop=stop)
+
+ info = (
+ self.info.get(self.non_index_axes[0][0], {})
+ if len(self.non_index_axes)
+ else {}
+ )
+
+ inds = [i for i, ax in enumerate(self.axes) if ax is self.index_axes[0]]
+ assert len(inds) == 1
+ ind = inds[0]
+
+ index = result[ind][0]
+
+ frames = []
+ for i, a in enumerate(self.axes):
+ if a not in self.values_axes:
+ continue
+ index_vals, cvalues = result[i]
+
+ # we could have a multi-index constructor here
+ # ensure_index doesn't recognized our list-of-tuples here
+ if info.get("type") != "MultiIndex":
+ cols = Index(index_vals)
+ else:
+ cols = MultiIndex.from_tuples(index_vals)
+
+ names = info.get("names")
+ if names is not None:
+ cols.set_names(names, inplace=True)
+
+ if self.is_transposed:
+ values = cvalues
+ index_ = cols
+ cols_ = Index(index, name=getattr(index, "name", None))
+ else:
+ values = cvalues.T
+ index_ = Index(index, name=getattr(index, "name", None))
+ cols_ = cols
+
+ # if we have a DataIndexableCol, its shape will only be 1 dim
+ if values.ndim == 1 and isinstance(values, np.ndarray):
+ values = values.reshape((1, values.shape[0]))
+
+ if isinstance(values, np.ndarray):
+ df = DataFrame(values.T, columns=cols_, index=index_, copy=False)
+ elif isinstance(values, Index):
+ df = DataFrame(values, columns=cols_, index=index_)
+ else:
+ # Categorical
+ df = DataFrame._from_arrays([values], columns=cols_, index=index_)
+ if not (using_pyarrow_string_dtype() and values.dtype.kind == "O"):
+ assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype)
+ if using_pyarrow_string_dtype() and is_string_array(
+ values, # type: ignore[arg-type]
+ skipna=True,
+ ):
+ df = df.astype("string[pyarrow_numpy]")
+ frames.append(df)
+
+ if len(frames) == 1:
+ df = frames[0]
+ else:
+ df = concat(frames, axis=1)
+
+ selection = Selection(self, where=where, start=start, stop=stop)
+ # apply the selection filters & axis orderings
+ df = self.process_axes(df, selection=selection, columns=columns)
+ return df
+
+
+class AppendableSeriesTable(AppendableFrameTable):
+ """support the new appendable table formats"""
+
+ pandas_kind = "series_table"
+ table_type = "appendable_series"
+ ndim = 2
+ obj_type = Series
+
+ @property
+ def is_transposed(self) -> bool:
+ return False
+
+ @classmethod
+ def get_object(cls, obj, transposed: bool):
+ return obj
+
+ # error: Signature of "write" incompatible with supertype "Fixed"
+ def write(self, obj, data_columns=None, **kwargs) -> None: # type: ignore[override]
+ """we are going to write this as a frame table"""
+ if not isinstance(obj, DataFrame):
+ name = obj.name or "values"
+ obj = obj.to_frame(name)
+ super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)
+
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ) -> Series:
+ is_multi_index = self.is_multi_index
+ if columns is not None and is_multi_index:
+ assert isinstance(self.levels, list) # needed for mypy
+ for n in self.levels:
+ if n not in columns:
+ columns.insert(0, n)
+ s = super().read(where=where, columns=columns, start=start, stop=stop)
+ if is_multi_index:
+ s.set_index(self.levels, inplace=True)
+
+ s = s.iloc[:, 0]
+
+ # remove the default name
+ if s.name == "values":
+ s.name = None
+ return s
+
+
+class AppendableMultiSeriesTable(AppendableSeriesTable):
+ """support the new appendable table formats"""
+
+ pandas_kind = "series_table"
+ table_type = "appendable_multiseries"
+
+ # error: Signature of "write" incompatible with supertype "Fixed"
+ def write(self, obj, **kwargs) -> None: # type: ignore[override]
+ """we are going to write this as a frame table"""
+ name = obj.name or "values"
+ newobj, self.levels = self.validate_multiindex(obj)
+ assert isinstance(self.levels, list) # for mypy
+ cols = list(self.levels)
+ cols.append(name)
+ newobj.columns = Index(cols)
+ super().write(obj=newobj, **kwargs)
+
+
+class GenericTable(AppendableFrameTable):
+ """a table that read/writes the generic pytables table format"""
+
+ pandas_kind = "frame_table"
+ table_type = "generic_table"
+ ndim = 2
+ obj_type = DataFrame
+ levels: list[Hashable]
+
+ @property
+ def pandas_type(self) -> str:
+ return self.pandas_kind
+
+ @property
+ def storable(self):
+ return getattr(self.group, "table", None) or self.group
+
+ def get_attrs(self) -> None:
+ """retrieve our attributes"""
+ self.non_index_axes = []
+ self.nan_rep = None
+ self.levels = []
+
+ self.index_axes = [a for a in self.indexables if a.is_an_indexable]
+ self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
+ self.data_columns = [a.name for a in self.values_axes]
+
+ @cache_readonly
+ def indexables(self):
+ """create the indexables from the table description"""
+ d = self.description
+
+ # TODO: can we get a typ for this? AFAICT it is the only place
+ # where we aren't passing one
+ # the index columns is just a simple index
+ md = self.read_metadata("index")
+ meta = "category" if md is not None else None
+ index_col = GenericIndexCol(
+ name="index", axis=0, table=self.table, meta=meta, metadata=md
+ )
+
+ _indexables: list[GenericIndexCol | GenericDataIndexableCol] = [index_col]
+
+ for i, n in enumerate(d._v_names):
+ assert isinstance(n, str)
+
+ atom = getattr(d, n)
+ md = self.read_metadata(n)
+ meta = "category" if md is not None else None
+ dc = GenericDataIndexableCol(
+ name=n,
+ pos=i,
+ values=[n],
+ typ=atom,
+ table=self.table,
+ meta=meta,
+ metadata=md,
+ )
+ _indexables.append(dc)
+
+ return _indexables
+
+ # error: Signature of "write" incompatible with supertype "AppendableTable"
+ def write(self, **kwargs) -> None: # type: ignore[override]
+ raise NotImplementedError("cannot write on an generic table")
+
+
+class AppendableMultiFrameTable(AppendableFrameTable):
+ """a frame with a multi-index"""
+
+ table_type = "appendable_multiframe"
+ obj_type = DataFrame
+ ndim = 2
+ _re_levels = re.compile(r"^level_\d+$")
+
+ @property
+ def table_type_short(self) -> str:
+ return "appendable_multi"
+
+ # error: Signature of "write" incompatible with supertype "Fixed"
+ def write(self, obj, data_columns=None, **kwargs) -> None: # type: ignore[override]
+ if data_columns is None:
+ data_columns = []
+ elif data_columns is True:
+ data_columns = obj.columns.tolist()
+ obj, self.levels = self.validate_multiindex(obj)
+ assert isinstance(self.levels, list) # for mypy
+ for n in self.levels:
+ if n not in data_columns:
+ data_columns.insert(0, n)
+ super().write(obj=obj, data_columns=data_columns, **kwargs)
+
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ):
+ df = super().read(where=where, columns=columns, start=start, stop=stop)
+ df = df.set_index(self.levels)
+
+ # remove names for 'level_%d'
+ df.index = df.index.set_names(
+ [None if self._re_levels.search(name) else name for name in df.index.names]
+ )
+
+ return df
+
+
+def _reindex_axis(
+ obj: DataFrame, axis: AxisInt, labels: Index, other=None
+) -> DataFrame:
+ ax = obj._get_axis(axis)
+ labels = ensure_index(labels)
+
+ # try not to reindex even if other is provided
+ # if it equals our current index
+ if other is not None:
+ other = ensure_index(other)
+ if (other is None or labels.equals(other)) and labels.equals(ax):
+ return obj
+
+ labels = ensure_index(labels.unique())
+ if other is not None:
+ labels = ensure_index(other.unique()).intersection(labels, sort=False)
+ if not labels.equals(ax):
+ slicer: list[slice | Index] = [slice(None, None)] * obj.ndim
+ slicer[axis] = labels
+ obj = obj.loc[tuple(slicer)]
+ return obj
+
+
+# tz to/from coercion
+
+
+def _get_tz(tz: tzinfo) -> str | tzinfo:
+ """for a tz-aware type, return an encoded zone"""
+ zone = timezones.get_timezone(tz)
+ return zone
+
+
+@overload
+def _set_tz(
+ values: np.ndarray | Index, tz: str | tzinfo, coerce: bool = False
+) -> DatetimeIndex:
+ ...
+
+
+@overload
+def _set_tz(values: np.ndarray | Index, tz: None, coerce: bool = False) -> np.ndarray:
+ ...
+
+
+def _set_tz(
+ values: np.ndarray | Index, tz: str | tzinfo | None, coerce: bool = False
+) -> np.ndarray | DatetimeIndex:
+ """
+ coerce the values to a DatetimeIndex if tz is set
+ preserve the input shape if possible
+
+ Parameters
+ ----------
+ values : ndarray or Index
+ tz : str or tzinfo
+ coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
+ """
+ if isinstance(values, DatetimeIndex):
+ # If values is tzaware, the tz gets dropped in the values.ravel()
+ # call below (which returns an ndarray). So we are only non-lossy
+ # if `tz` matches `values.tz`.
+ assert values.tz is None or values.tz == tz
+ if values.tz is not None:
+ return values
+
+ if tz is not None:
+ if isinstance(values, DatetimeIndex):
+ name = values.name
+ else:
+ name = None
+ values = values.ravel()
+
+ tz = _ensure_decoded(tz)
+ values = DatetimeIndex(values, name=name)
+ values = values.tz_localize("UTC").tz_convert(tz)
+ elif coerce:
+ values = np.asarray(values, dtype="M8[ns]")
+
+ # error: Incompatible return value type (got "Union[ndarray, Index]",
+ # expected "Union[ndarray, DatetimeIndex]")
+ return values # type: ignore[return-value]
+
+
+def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol:
+ assert isinstance(name, str)
+
+ index_name = index.name
+ # error: Argument 1 to "_get_data_and_dtype_name" has incompatible type "Index";
+ # expected "Union[ExtensionArray, ndarray]"
+ converted, dtype_name = _get_data_and_dtype_name(index) # type: ignore[arg-type]
+ kind = _dtype_to_kind(dtype_name)
+ atom = DataIndexableCol._get_atom(converted)
+
+ if (
+ lib.is_np_dtype(index.dtype, "iu")
+ or needs_i8_conversion(index.dtype)
+ or is_bool_dtype(index.dtype)
+ ):
+ # Includes Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
+ # in which case "kind" is "integer", "integer", "datetime64",
+ # "timedelta64", and "integer", respectively.
+ return IndexCol(
+ name,
+ values=converted,
+ kind=kind,
+ typ=atom,
+ freq=getattr(index, "freq", None),
+ tz=getattr(index, "tz", None),
+ index_name=index_name,
+ )
+
+ if isinstance(index, MultiIndex):
+ raise TypeError("MultiIndex not supported here!")
+
+ inferred_type = lib.infer_dtype(index, skipna=False)
+ # we won't get inferred_type of "datetime64" or "timedelta64" as these
+ # would go through the DatetimeIndex/TimedeltaIndex paths above
+
+ values = np.asarray(index)
+
+ if inferred_type == "date":
+ converted = np.asarray([v.toordinal() for v in values], dtype=np.int32)
+ return IndexCol(
+ name, converted, "date", _tables().Time32Col(), index_name=index_name
+ )
+ elif inferred_type == "string":
+ converted = _convert_string_array(values, encoding, errors)
+ itemsize = converted.dtype.itemsize
+ return IndexCol(
+ name,
+ converted,
+ "string",
+ _tables().StringCol(itemsize),
+ index_name=index_name,
+ )
+
+ elif inferred_type in ["integer", "floating"]:
+ return IndexCol(
+ name, values=converted, kind=kind, typ=atom, index_name=index_name
+ )
+ else:
+ assert isinstance(converted, np.ndarray) and converted.dtype == object
+ assert kind == "object", kind
+ atom = _tables().ObjectAtom()
+ return IndexCol(name, converted, kind, atom, index_name=index_name)
+
+
+def _unconvert_index(data, kind: str, encoding: str, errors: str) -> np.ndarray | Index:
+ index: Index | np.ndarray
+
+ if kind.startswith("datetime64"):
+ if kind == "datetime64":
+ # created before we stored resolution information
+ index = DatetimeIndex(data)
+ else:
+ index = DatetimeIndex(data.view(kind))
+ elif kind == "timedelta64":
+ index = TimedeltaIndex(data)
+ elif kind == "date":
+ try:
+ index = np.asarray([date.fromordinal(v) for v in data], dtype=object)
+ except ValueError:
+ index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object)
+ elif kind in ("integer", "float", "bool"):
+ index = np.asarray(data)
+ elif kind in ("string"):
+ index = _unconvert_string_array(
+ data, nan_rep=None, encoding=encoding, errors=errors
+ )
+ elif kind == "object":
+ index = np.asarray(data[0])
+ else: # pragma: no cover
+ raise ValueError(f"unrecognized index type {kind}")
+ return index
+
+
+def _maybe_convert_for_string_atom(
+ name: str,
+ bvalues: ArrayLike,
+ existing_col,
+ min_itemsize,
+ nan_rep,
+ encoding,
+ errors,
+ columns: list[str],
+):
+ if bvalues.dtype != object:
+ return bvalues
+
+ bvalues = cast(np.ndarray, bvalues)
+
+ dtype_name = bvalues.dtype.name
+ inferred_type = lib.infer_dtype(bvalues, skipna=False)
+
+ if inferred_type == "date":
+ raise TypeError("[date] is not implemented as a table column")
+ if inferred_type == "datetime":
+ # after GH#8260
+ # this only would be hit for a multi-timezone dtype which is an error
+ raise TypeError(
+ "too many timezones in this block, create separate data columns"
+ )
+
+ if not (inferred_type == "string" or dtype_name == "object"):
+ return bvalues
+
+ mask = isna(bvalues)
+ data = bvalues.copy()
+ data[mask] = nan_rep
+
+ # see if we have a valid string type
+ inferred_type = lib.infer_dtype(data, skipna=False)
+ if inferred_type != "string":
+ # we cannot serialize this data, so report an exception on a column
+ # by column basis
+
+ # expected behaviour:
+ # search block for a non-string object column by column
+ for i in range(data.shape[0]):
+ col = data[i]
+ inferred_type = lib.infer_dtype(col, skipna=False)
+ if inferred_type != "string":
+ error_column_label = columns[i] if len(columns) > i else f"No.{i}"
+ raise TypeError(
+ f"Cannot serialize the column [{error_column_label}]\n"
+ f"because its data contents are not [string] but "
+ f"[{inferred_type}] object dtype"
+ )
+
+ # itemsize is the maximum length of a string (along any dimension)
+
+ data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape)
+ itemsize = data_converted.itemsize
+
+ # specified min_itemsize?
+ if isinstance(min_itemsize, dict):
+ min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0)
+ itemsize = max(min_itemsize or 0, itemsize)
+
+ # check for column in the values conflicts
+ if existing_col is not None:
+ eci = existing_col.validate_col(itemsize)
+ if eci is not None and eci > itemsize:
+ itemsize = eci
+
+ data_converted = data_converted.astype(f"|S{itemsize}", copy=False)
+ return data_converted
+
+
+def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray:
+ """
+ Take a string-like that is object dtype and coerce to a fixed size string type.
+
+ Parameters
+ ----------
+ data : np.ndarray[object]
+ encoding : str
+ errors : str
+ Handler for encoding errors.
+
+ Returns
+ -------
+ np.ndarray[fixed-length-string]
+ """
+ # encode if needed
+ if len(data):
+ data = (
+ Series(data.ravel(), copy=False)
+ .str.encode(encoding, errors)
+ ._values.reshape(data.shape)
+ )
+
+ # create the sized dtype
+ ensured = ensure_object(data.ravel())
+ itemsize = max(1, libwriters.max_len_string_array(ensured))
+
+ data = np.asarray(data, dtype=f"S{itemsize}")
+ return data
+
+
+def _unconvert_string_array(
+ data: np.ndarray, nan_rep, encoding: str, errors: str
+) -> np.ndarray:
+ """
+ Inverse of _convert_string_array.
+
+ Parameters
+ ----------
+ data : np.ndarray[fixed-length-string]
+ nan_rep : the storage repr of NaN
+ encoding : str
+ errors : str
+ Handler for encoding errors.
+
+ Returns
+ -------
+ np.ndarray[object]
+ Decoded data.
+ """
+ shape = data.shape
+ data = np.asarray(data.ravel(), dtype=object)
+
+ if len(data):
+ itemsize = libwriters.max_len_string_array(ensure_object(data))
+ dtype = f"U{itemsize}"
+
+ if isinstance(data[0], bytes):
+ data = Series(data, copy=False).str.decode(encoding, errors=errors)._values
+ else:
+ data = data.astype(dtype, copy=False).astype(object, copy=False)
+
+ if nan_rep is None:
+ nan_rep = "nan"
+
+ libwriters.string_array_replace_from_nan_rep(data, nan_rep)
+ return data.reshape(shape)
+
+
+def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str):
+ assert isinstance(val_kind, str), type(val_kind)
+ if _need_convert(val_kind):
+ conv = _get_converter(val_kind, encoding, errors)
+ values = conv(values)
+ return values
+
+
+def _get_converter(kind: str, encoding: str, errors: str):
+ if kind == "datetime64":
+ return lambda x: np.asarray(x, dtype="M8[ns]")
+ elif "datetime64" in kind:
+ return lambda x: np.asarray(x, dtype=kind)
+ elif kind == "string":
+ return lambda x: _unconvert_string_array(
+ x, nan_rep=None, encoding=encoding, errors=errors
+ )
+ else: # pragma: no cover
+ raise ValueError(f"invalid kind {kind}")
+
+
+def _need_convert(kind: str) -> bool:
+ if kind in ("datetime64", "string") or "datetime64" in kind:
+ return True
+ return False
+
+
+def _maybe_adjust_name(name: str, version: Sequence[int]) -> str:
+ """
+ Prior to 0.10.1, we named values blocks like: values_block_0 an the
+ name values_0, adjust the given name if necessary.
+
+ Parameters
+ ----------
+ name : str
+ version : Tuple[int, int, int]
+
+ Returns
+ -------
+ str
+ """
+ if isinstance(version, str) or len(version) < 3:
+ raise ValueError("Version is incorrect, expected sequence of 3 integers.")
+
+ if version[0] == 0 and version[1] <= 10 and version[2] == 0:
+ m = re.search(r"values_block_(\d+)", name)
+ if m:
+ grp = m.groups()[0]
+ name = f"values_{grp}"
+ return name
+
+
+def _dtype_to_kind(dtype_str: str) -> str:
+ """
+ Find the "kind" string describing the given dtype name.
+ """
+ dtype_str = _ensure_decoded(dtype_str)
+
+ if dtype_str.startswith(("string", "bytes")):
+ kind = "string"
+ elif dtype_str.startswith("float"):
+ kind = "float"
+ elif dtype_str.startswith("complex"):
+ kind = "complex"
+ elif dtype_str.startswith(("int", "uint")):
+ kind = "integer"
+ elif dtype_str.startswith("datetime64"):
+ kind = dtype_str
+ elif dtype_str.startswith("timedelta"):
+ kind = "timedelta64"
+ elif dtype_str.startswith("bool"):
+ kind = "bool"
+ elif dtype_str.startswith("category"):
+ kind = "category"
+ elif dtype_str.startswith("period"):
+ # We store the `freq` attr so we can restore from integers
+ kind = "integer"
+ elif dtype_str == "object":
+ kind = "object"
+ else:
+ raise ValueError(f"cannot interpret dtype of [{dtype_str}]")
+
+ return kind
+
+
+def _get_data_and_dtype_name(data: ArrayLike):
+ """
+ Convert the passed data into a storable form and a dtype string.
+ """
+ if isinstance(data, Categorical):
+ data = data.codes
+
+ if isinstance(data.dtype, DatetimeTZDtype):
+ # For datetime64tz we need to drop the TZ in tests TODO: why?
+ dtype_name = f"datetime64[{data.dtype.unit}]"
+ else:
+ dtype_name = data.dtype.name
+
+ if data.dtype.kind in "mM":
+ data = np.asarray(data.view("i8"))
+ # TODO: we used to reshape for the dt64tz case, but no longer
+ # doing that doesn't seem to break anything. why?
+
+ elif isinstance(data, PeriodIndex):
+ data = data.asi8
+
+ data = np.asarray(data)
+ return data, dtype_name
+
+
+class Selection:
+ """
+ Carries out a selection operation on a tables.Table object.
+
+ Parameters
+ ----------
+ table : a Table object
+ where : list of Terms (or convertible to)
+ start, stop: indices to start and/or stop selection
+
+ """
+
+ def __init__(
+ self,
+ table: Table,
+ where=None,
+ start: int | None = None,
+ stop: int | None = None,
+ ) -> None:
+ self.table = table
+ self.where = where
+ self.start = start
+ self.stop = stop
+ self.condition = None
+ self.filter = None
+ self.terms = None
+ self.coordinates = None
+
+ if is_list_like(where):
+ # see if we have a passed coordinate like
+ with suppress(ValueError):
+ inferred = lib.infer_dtype(where, skipna=False)
+ if inferred in ("integer", "boolean"):
+ where = np.asarray(where)
+ if where.dtype == np.bool_:
+ start, stop = self.start, self.stop
+ if start is None:
+ start = 0
+ if stop is None:
+ stop = self.table.nrows
+ self.coordinates = np.arange(start, stop)[where]
+ elif issubclass(where.dtype.type, np.integer):
+ if (self.start is not None and (where < self.start).any()) or (
+ self.stop is not None and (where >= self.stop).any()
+ ):
+ raise ValueError(
+ "where must have index locations >= start and < stop"
+ )
+ self.coordinates = where
+
+ if self.coordinates is None:
+ self.terms = self.generate(where)
+
+ # create the numexpr & the filter
+ if self.terms is not None:
+ self.condition, self.filter = self.terms.evaluate()
+
+ def generate(self, where):
+ """where can be a : dict,list,tuple,string"""
+ if where is None:
+ return None
+
+ q = self.table.queryables()
+ try:
+ return PyTablesExpr(where, queryables=q, encoding=self.table.encoding)
+ except NameError as err:
+ # raise a nice message, suggesting that the user should use
+ # data_columns
+ qkeys = ",".join(q.keys())
+ msg = dedent(
+ f"""\
+ The passed where expression: {where}
+ contains an invalid variable reference
+ all of the variable references must be a reference to
+ an axis (e.g. 'index' or 'columns'), or a data_column
+ The currently defined references are: {qkeys}
+ """
+ )
+ raise ValueError(msg) from err
+
+ def select(self):
+ """
+ generate the selection
+ """
+ if self.condition is not None:
+ return self.table.table.read_where(
+ self.condition.format(), start=self.start, stop=self.stop
+ )
+ elif self.coordinates is not None:
+ return self.table.table.read_coordinates(self.coordinates)
+ return self.table.table.read(start=self.start, stop=self.stop)
+
+ def select_coords(self):
+ """
+ generate the selection
+ """
+ start, stop = self.start, self.stop
+ nrows = self.table.nrows
+ if start is None:
+ start = 0
+ elif start < 0:
+ start += nrows
+ if stop is None:
+ stop = nrows
+ elif stop < 0:
+ stop += nrows
+
+ if self.condition is not None:
+ return self.table.table.get_where_list(
+ self.condition.format(), start=start, stop=stop, sort=True
+ )
+ elif self.coordinates is not None:
+ return self.coordinates
+
+ return np.arange(start, stop)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..317730745b6e3a0278a48b7bb810cf43e718e787
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/__init__.py
@@ -0,0 +1,3 @@
+from pandas.io.sas.sasreader import read_sas
+
+__all__ = ["read_sas"]
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sas7bdat.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sas7bdat.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5bdfb554178816f9d668157f87514776f277eb9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sas7bdat.py
@@ -0,0 +1,756 @@
+"""
+Read SAS7BDAT files
+
+Based on code written by Jared Hobbs:
+ https://bitbucket.org/jaredhobbs/sas7bdat
+
+See also:
+ https://github.com/BioStatMatt/sas7bdat
+
+Partial documentation of the file format:
+ https://cran.r-project.org/package=sas7bdat/vignettes/sas7bdat.pdf
+
+Reference for binary data compression:
+ http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
+"""
+from __future__ import annotations
+
+from collections import abc
+from datetime import (
+ datetime,
+ timedelta,
+)
+import sys
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+from pandas._libs.byteswap import (
+ read_double_with_byteswap,
+ read_float_with_byteswap,
+ read_uint16_with_byteswap,
+ read_uint32_with_byteswap,
+ read_uint64_with_byteswap,
+)
+from pandas._libs.sas import (
+ Parser,
+ get_subheader_index,
+)
+from pandas._libs.tslibs.conversion import cast_from_unit_vectorized
+from pandas.errors import EmptyDataError
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Timestamp,
+ isna,
+)
+
+from pandas.io.common import get_handle
+import pandas.io.sas.sas_constants as const
+from pandas.io.sas.sasreader import ReaderBase
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ CompressionOptions,
+ FilePath,
+ ReadBuffer,
+ )
+
+
+_unix_origin = Timestamp("1970-01-01")
+_sas_origin = Timestamp("1960-01-01")
+
+
+def _parse_datetime(sas_datetime: float, unit: str):
+ if isna(sas_datetime):
+ return pd.NaT
+
+ if unit == "s":
+ return datetime(1960, 1, 1) + timedelta(seconds=sas_datetime)
+
+ elif unit == "d":
+ return datetime(1960, 1, 1) + timedelta(days=sas_datetime)
+
+ else:
+ raise ValueError("unit must be 'd' or 's'")
+
+
+def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series:
+ """
+ Convert to Timestamp if possible, otherwise to datetime.datetime.
+ SAS float64 lacks precision for more than ms resolution so the fit
+ to datetime.datetime is ok.
+
+ Parameters
+ ----------
+ sas_datetimes : {Series, Sequence[float]}
+ Dates or datetimes in SAS
+ unit : {'d', 's'}
+ "d" if the floats represent dates, "s" for datetimes
+
+ Returns
+ -------
+ Series
+ Series of datetime64 dtype or datetime.datetime.
+ """
+ td = (_sas_origin - _unix_origin).as_unit("s")
+ if unit == "s":
+ millis = cast_from_unit_vectorized(
+ sas_datetimes._values, unit="s", out_unit="ms"
+ )
+ dt64ms = millis.view("M8[ms]") + td
+ return pd.Series(dt64ms, index=sas_datetimes.index, copy=False)
+ else:
+ vals = np.array(sas_datetimes, dtype="M8[D]") + td
+ return pd.Series(vals, dtype="M8[s]", index=sas_datetimes.index, copy=False)
+
+
+class _Column:
+ col_id: int
+ name: str | bytes
+ label: str | bytes
+ format: str | bytes
+ ctype: bytes
+ length: int
+
+ def __init__(
+ self,
+ col_id: int,
+ # These can be bytes when convert_header_text is False
+ name: str | bytes,
+ label: str | bytes,
+ format: str | bytes,
+ ctype: bytes,
+ length: int,
+ ) -> None:
+ self.col_id = col_id
+ self.name = name
+ self.label = label
+ self.format = format
+ self.ctype = ctype
+ self.length = length
+
+
+# SAS7BDAT represents a SAS data file in SAS7BDAT format.
+class SAS7BDATReader(ReaderBase, abc.Iterator):
+ """
+ Read SAS files in SAS7BDAT format.
+
+ Parameters
+ ----------
+ path_or_buf : path name or buffer
+ Name of SAS file or file-like object pointing to SAS file
+ contents.
+ index : column identifier, defaults to None
+ Column to use as index.
+ convert_dates : bool, defaults to True
+ Attempt to convert dates to Pandas datetime values. Note that
+ some rarely used SAS date formats may be unsupported.
+ blank_missing : bool, defaults to True
+ Convert empty strings to missing values (SAS uses blanks to
+ indicate missing character variables).
+ chunksize : int, defaults to None
+ Return SAS7BDATReader object for iterations, returns chunks
+ with given number of lines.
+ encoding : str, 'infer', defaults to None
+ String encoding acc. to Python standard encodings,
+ encoding='infer' tries to detect the encoding from the file header,
+ encoding=None will leave the data in binary format.
+ convert_text : bool, defaults to True
+ If False, text variables are left as raw bytes.
+ convert_header_text : bool, defaults to True
+ If False, header text, including column names, are left as raw
+ bytes.
+ """
+
+ _int_length: int
+ _cached_page: bytes | None
+
+ def __init__(
+ self,
+ path_or_buf: FilePath | ReadBuffer[bytes],
+ index=None,
+ convert_dates: bool = True,
+ blank_missing: bool = True,
+ chunksize: int | None = None,
+ encoding: str | None = None,
+ convert_text: bool = True,
+ convert_header_text: bool = True,
+ compression: CompressionOptions = "infer",
+ ) -> None:
+ self.index = index
+ self.convert_dates = convert_dates
+ self.blank_missing = blank_missing
+ self.chunksize = chunksize
+ self.encoding = encoding
+ self.convert_text = convert_text
+ self.convert_header_text = convert_header_text
+
+ self.default_encoding = "latin-1"
+ self.compression = b""
+ self.column_names_raw: list[bytes] = []
+ self.column_names: list[str | bytes] = []
+ self.column_formats: list[str | bytes] = []
+ self.columns: list[_Column] = []
+
+ self._current_page_data_subheader_pointers: list[tuple[int, int]] = []
+ self._cached_page = None
+ self._column_data_lengths: list[int] = []
+ self._column_data_offsets: list[int] = []
+ self._column_types: list[bytes] = []
+
+ self._current_row_in_file_index = 0
+ self._current_row_on_page_index = 0
+ self._current_row_in_file_index = 0
+
+ self.handles = get_handle(
+ path_or_buf, "rb", is_text=False, compression=compression
+ )
+
+ self._path_or_buf = self.handles.handle
+
+ # Same order as const.SASIndex
+ self._subheader_processors = [
+ self._process_rowsize_subheader,
+ self._process_columnsize_subheader,
+ self._process_subheader_counts,
+ self._process_columntext_subheader,
+ self._process_columnname_subheader,
+ self._process_columnattributes_subheader,
+ self._process_format_subheader,
+ self._process_columnlist_subheader,
+ None, # Data
+ ]
+
+ try:
+ self._get_properties()
+ self._parse_metadata()
+ except Exception:
+ self.close()
+ raise
+
+ def column_data_lengths(self) -> np.ndarray:
+ """Return a numpy int64 array of the column data lengths"""
+ return np.asarray(self._column_data_lengths, dtype=np.int64)
+
+ def column_data_offsets(self) -> np.ndarray:
+ """Return a numpy int64 array of the column offsets"""
+ return np.asarray(self._column_data_offsets, dtype=np.int64)
+
+ def column_types(self) -> np.ndarray:
+ """
+ Returns a numpy character array of the column types:
+ s (string) or d (double)
+ """
+ return np.asarray(self._column_types, dtype=np.dtype("S1"))
+
+ def close(self) -> None:
+ self.handles.close()
+
+ def _get_properties(self) -> None:
+ # Check magic number
+ self._path_or_buf.seek(0)
+ self._cached_page = self._path_or_buf.read(288)
+ if self._cached_page[0 : len(const.magic)] != const.magic:
+ raise ValueError("magic number mismatch (not a SAS file?)")
+
+ # Get alignment information
+ buf = self._read_bytes(const.align_1_offset, const.align_1_length)
+ if buf == const.u64_byte_checker_value:
+ self.U64 = True
+ self._int_length = 8
+ self._page_bit_offset = const.page_bit_offset_x64
+ self._subheader_pointer_length = const.subheader_pointer_length_x64
+ else:
+ self.U64 = False
+ self._page_bit_offset = const.page_bit_offset_x86
+ self._subheader_pointer_length = const.subheader_pointer_length_x86
+ self._int_length = 4
+ buf = self._read_bytes(const.align_2_offset, const.align_2_length)
+ if buf == const.align_1_checker_value:
+ align1 = const.align_2_value
+ else:
+ align1 = 0
+
+ # Get endianness information
+ buf = self._read_bytes(const.endianness_offset, const.endianness_length)
+ if buf == b"\x01":
+ self.byte_order = "<"
+ self.need_byteswap = sys.byteorder == "big"
+ else:
+ self.byte_order = ">"
+ self.need_byteswap = sys.byteorder == "little"
+
+ # Get encoding information
+ buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
+ if buf in const.encoding_names:
+ self.inferred_encoding = const.encoding_names[buf]
+ if self.encoding == "infer":
+ self.encoding = self.inferred_encoding
+ else:
+ self.inferred_encoding = f"unknown (code={buf})"
+
+ # Timestamp is epoch 01/01/1960
+ epoch = datetime(1960, 1, 1)
+ x = self._read_float(
+ const.date_created_offset + align1, const.date_created_length
+ )
+ self.date_created = epoch + pd.to_timedelta(x, unit="s")
+ x = self._read_float(
+ const.date_modified_offset + align1, const.date_modified_length
+ )
+ self.date_modified = epoch + pd.to_timedelta(x, unit="s")
+
+ self.header_length = self._read_uint(
+ const.header_size_offset + align1, const.header_size_length
+ )
+
+ # Read the rest of the header into cached_page.
+ buf = self._path_or_buf.read(self.header_length - 288)
+ self._cached_page += buf
+ # error: Argument 1 to "len" has incompatible type "Optional[bytes]";
+ # expected "Sized"
+ if len(self._cached_page) != self.header_length: # type: ignore[arg-type]
+ raise ValueError("The SAS7BDAT file appears to be truncated.")
+
+ self._page_length = self._read_uint(
+ const.page_size_offset + align1, const.page_size_length
+ )
+
+ def __next__(self) -> DataFrame:
+ da = self.read(nrows=self.chunksize or 1)
+ if da.empty:
+ self.close()
+ raise StopIteration
+ return da
+
+ # Read a single float of the given width (4 or 8).
+ def _read_float(self, offset: int, width: int):
+ assert self._cached_page is not None
+ if width == 4:
+ return read_float_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 8:
+ return read_double_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ else:
+ self.close()
+ raise ValueError("invalid float width")
+
+ # Read a single unsigned integer of the given width (1, 2, 4 or 8).
+ def _read_uint(self, offset: int, width: int) -> int:
+ assert self._cached_page is not None
+ if width == 1:
+ return self._read_bytes(offset, 1)[0]
+ elif width == 2:
+ return read_uint16_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 4:
+ return read_uint32_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 8:
+ return read_uint64_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ else:
+ self.close()
+ raise ValueError("invalid int width")
+
+ def _read_bytes(self, offset: int, length: int):
+ assert self._cached_page is not None
+ if offset + length > len(self._cached_page):
+ self.close()
+ raise ValueError("The cached page is too small.")
+ return self._cached_page[offset : offset + length]
+
+ def _read_and_convert_header_text(self, offset: int, length: int) -> str | bytes:
+ return self._convert_header_text(
+ self._read_bytes(offset, length).rstrip(b"\x00 ")
+ )
+
+ def _parse_metadata(self) -> None:
+ done = False
+ while not done:
+ self._cached_page = self._path_or_buf.read(self._page_length)
+ if len(self._cached_page) <= 0:
+ break
+ if len(self._cached_page) != self._page_length:
+ raise ValueError("Failed to read a meta data page from the SAS file.")
+ done = self._process_page_meta()
+
+ def _process_page_meta(self) -> bool:
+ self._read_page_header()
+ pt = const.page_meta_types + [const.page_amd_type, const.page_mix_type]
+ if self._current_page_type in pt:
+ self._process_page_metadata()
+ is_data_page = self._current_page_type == const.page_data_type
+ is_mix_page = self._current_page_type == const.page_mix_type
+ return bool(
+ is_data_page
+ or is_mix_page
+ or self._current_page_data_subheader_pointers != []
+ )
+
+ def _read_page_header(self) -> None:
+ bit_offset = self._page_bit_offset
+ tx = const.page_type_offset + bit_offset
+ self._current_page_type = (
+ self._read_uint(tx, const.page_type_length) & const.page_type_mask2
+ )
+ tx = const.block_count_offset + bit_offset
+ self._current_page_block_count = self._read_uint(tx, const.block_count_length)
+ tx = const.subheader_count_offset + bit_offset
+ self._current_page_subheaders_count = self._read_uint(
+ tx, const.subheader_count_length
+ )
+
+ def _process_page_metadata(self) -> None:
+ bit_offset = self._page_bit_offset
+
+ for i in range(self._current_page_subheaders_count):
+ offset = const.subheader_pointers_offset + bit_offset
+ total_offset = offset + self._subheader_pointer_length * i
+
+ subheader_offset = self._read_uint(total_offset, self._int_length)
+ total_offset += self._int_length
+
+ subheader_length = self._read_uint(total_offset, self._int_length)
+ total_offset += self._int_length
+
+ subheader_compression = self._read_uint(total_offset, 1)
+ total_offset += 1
+
+ subheader_type = self._read_uint(total_offset, 1)
+
+ if (
+ subheader_length == 0
+ or subheader_compression == const.truncated_subheader_id
+ ):
+ continue
+
+ subheader_signature = self._read_bytes(subheader_offset, self._int_length)
+ subheader_index = get_subheader_index(subheader_signature)
+ subheader_processor = self._subheader_processors[subheader_index]
+
+ if subheader_processor is None:
+ f1 = subheader_compression in (const.compressed_subheader_id, 0)
+ f2 = subheader_type == const.compressed_subheader_type
+ if self.compression and f1 and f2:
+ self._current_page_data_subheader_pointers.append(
+ (subheader_offset, subheader_length)
+ )
+ else:
+ self.close()
+ raise ValueError(
+ f"Unknown subheader signature {subheader_signature}"
+ )
+ else:
+ subheader_processor(subheader_offset, subheader_length)
+
+ def _process_rowsize_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ lcs_offset = offset
+ lcp_offset = offset
+ if self.U64:
+ lcs_offset += 682
+ lcp_offset += 706
+ else:
+ lcs_offset += 354
+ lcp_offset += 378
+
+ self.row_length = self._read_uint(
+ offset + const.row_length_offset_multiplier * int_len,
+ int_len,
+ )
+ self.row_count = self._read_uint(
+ offset + const.row_count_offset_multiplier * int_len,
+ int_len,
+ )
+ self.col_count_p1 = self._read_uint(
+ offset + const.col_count_p1_multiplier * int_len, int_len
+ )
+ self.col_count_p2 = self._read_uint(
+ offset + const.col_count_p2_multiplier * int_len, int_len
+ )
+ mx = const.row_count_on_mix_page_offset_multiplier * int_len
+ self._mix_page_row_count = self._read_uint(offset + mx, int_len)
+ self._lcs = self._read_uint(lcs_offset, 2)
+ self._lcp = self._read_uint(lcp_offset, 2)
+
+ def _process_columnsize_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ offset += int_len
+ self.column_count = self._read_uint(offset, int_len)
+ if self.col_count_p1 + self.col_count_p2 != self.column_count:
+ print(
+ f"Warning: column count mismatch ({self.col_count_p1} + "
+ f"{self.col_count_p2} != {self.column_count})\n"
+ )
+
+ # Unknown purpose
+ def _process_subheader_counts(self, offset: int, length: int) -> None:
+ pass
+
+ def _process_columntext_subheader(self, offset: int, length: int) -> None:
+ offset += self._int_length
+ text_block_size = self._read_uint(offset, const.text_block_size_length)
+
+ buf = self._read_bytes(offset, text_block_size)
+ cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
+ self.column_names_raw.append(cname_raw)
+
+ if len(self.column_names_raw) == 1:
+ compression_literal = b""
+ for cl in const.compression_literals:
+ if cl in cname_raw:
+ compression_literal = cl
+ self.compression = compression_literal
+ offset -= self._int_length
+
+ offset1 = offset + 16
+ if self.U64:
+ offset1 += 4
+
+ buf = self._read_bytes(offset1, self._lcp)
+ compression_literal = buf.rstrip(b"\x00")
+ if compression_literal == b"":
+ self._lcs = 0
+ offset1 = offset + 32
+ if self.U64:
+ offset1 += 4
+ buf = self._read_bytes(offset1, self._lcp)
+ self.creator_proc = buf[0 : self._lcp]
+ elif compression_literal == const.rle_compression:
+ offset1 = offset + 40
+ if self.U64:
+ offset1 += 4
+ buf = self._read_bytes(offset1, self._lcp)
+ self.creator_proc = buf[0 : self._lcp]
+ elif self._lcs > 0:
+ self._lcp = 0
+ offset1 = offset + 16
+ if self.U64:
+ offset1 += 4
+ buf = self._read_bytes(offset1, self._lcs)
+ self.creator_proc = buf[0 : self._lcp]
+ if hasattr(self, "creator_proc"):
+ self.creator_proc = self._convert_header_text(self.creator_proc)
+
+ def _process_columnname_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ offset += int_len
+ column_name_pointers_count = (length - 2 * int_len - 12) // 8
+ for i in range(column_name_pointers_count):
+ text_subheader = (
+ offset
+ + const.column_name_pointer_length * (i + 1)
+ + const.column_name_text_subheader_offset
+ )
+ col_name_offset = (
+ offset
+ + const.column_name_pointer_length * (i + 1)
+ + const.column_name_offset_offset
+ )
+ col_name_length = (
+ offset
+ + const.column_name_pointer_length * (i + 1)
+ + const.column_name_length_offset
+ )
+
+ idx = self._read_uint(
+ text_subheader, const.column_name_text_subheader_length
+ )
+ col_offset = self._read_uint(
+ col_name_offset, const.column_name_offset_length
+ )
+ col_len = self._read_uint(col_name_length, const.column_name_length_length)
+
+ name_raw = self.column_names_raw[idx]
+ cname = name_raw[col_offset : col_offset + col_len]
+ self.column_names.append(self._convert_header_text(cname))
+
+ def _process_columnattributes_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8)
+ for i in range(column_attributes_vectors_count):
+ col_data_offset = (
+ offset + int_len + const.column_data_offset_offset + i * (int_len + 8)
+ )
+ col_data_len = (
+ offset
+ + 2 * int_len
+ + const.column_data_length_offset
+ + i * (int_len + 8)
+ )
+ col_types = (
+ offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)
+ )
+
+ x = self._read_uint(col_data_offset, int_len)
+ self._column_data_offsets.append(x)
+
+ x = self._read_uint(col_data_len, const.column_data_length_length)
+ self._column_data_lengths.append(x)
+
+ x = self._read_uint(col_types, const.column_type_length)
+ self._column_types.append(b"d" if x == 1 else b"s")
+
+ def _process_columnlist_subheader(self, offset: int, length: int) -> None:
+ # unknown purpose
+ pass
+
+ def _process_format_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ text_subheader_format = (
+ offset + const.column_format_text_subheader_index_offset + 3 * int_len
+ )
+ col_format_offset = offset + const.column_format_offset_offset + 3 * int_len
+ col_format_len = offset + const.column_format_length_offset + 3 * int_len
+ text_subheader_label = (
+ offset + const.column_label_text_subheader_index_offset + 3 * int_len
+ )
+ col_label_offset = offset + const.column_label_offset_offset + 3 * int_len
+ col_label_len = offset + const.column_label_length_offset + 3 * int_len
+
+ x = self._read_uint(
+ text_subheader_format, const.column_format_text_subheader_index_length
+ )
+ format_idx = min(x, len(self.column_names_raw) - 1)
+
+ format_start = self._read_uint(
+ col_format_offset, const.column_format_offset_length
+ )
+ format_len = self._read_uint(col_format_len, const.column_format_length_length)
+
+ label_idx = self._read_uint(
+ text_subheader_label, const.column_label_text_subheader_index_length
+ )
+ label_idx = min(label_idx, len(self.column_names_raw) - 1)
+
+ label_start = self._read_uint(
+ col_label_offset, const.column_label_offset_length
+ )
+ label_len = self._read_uint(col_label_len, const.column_label_length_length)
+
+ label_names = self.column_names_raw[label_idx]
+ column_label = self._convert_header_text(
+ label_names[label_start : label_start + label_len]
+ )
+ format_names = self.column_names_raw[format_idx]
+ column_format = self._convert_header_text(
+ format_names[format_start : format_start + format_len]
+ )
+ current_column_number = len(self.columns)
+
+ col = _Column(
+ current_column_number,
+ self.column_names[current_column_number],
+ column_label,
+ column_format,
+ self._column_types[current_column_number],
+ self._column_data_lengths[current_column_number],
+ )
+
+ self.column_formats.append(column_format)
+ self.columns.append(col)
+
+ def read(self, nrows: int | None = None) -> DataFrame:
+ if (nrows is None) and (self.chunksize is not None):
+ nrows = self.chunksize
+ elif nrows is None:
+ nrows = self.row_count
+
+ if len(self._column_types) == 0:
+ self.close()
+ raise EmptyDataError("No columns to parse from file")
+
+ if nrows > 0 and self._current_row_in_file_index >= self.row_count:
+ return DataFrame()
+
+ nrows = min(nrows, self.row_count - self._current_row_in_file_index)
+
+ nd = self._column_types.count(b"d")
+ ns = self._column_types.count(b"s")
+
+ self._string_chunk = np.empty((ns, nrows), dtype=object)
+ self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8)
+
+ self._current_row_in_chunk_index = 0
+ p = Parser(self)
+ p.read(nrows)
+
+ rslt = self._chunk_to_dataframe()
+ if self.index is not None:
+ rslt = rslt.set_index(self.index)
+
+ return rslt
+
+ def _read_next_page(self):
+ self._current_page_data_subheader_pointers = []
+ self._cached_page = self._path_or_buf.read(self._page_length)
+ if len(self._cached_page) <= 0:
+ return True
+ elif len(self._cached_page) != self._page_length:
+ self.close()
+ msg = (
+ "failed to read complete page from file (read "
+ f"{len(self._cached_page):d} of {self._page_length:d} bytes)"
+ )
+ raise ValueError(msg)
+
+ self._read_page_header()
+ if self._current_page_type in const.page_meta_types:
+ self._process_page_metadata()
+
+ if self._current_page_type not in const.page_meta_types + [
+ const.page_data_type,
+ const.page_mix_type,
+ ]:
+ return self._read_next_page()
+
+ return False
+
+ def _chunk_to_dataframe(self) -> DataFrame:
+ n = self._current_row_in_chunk_index
+ m = self._current_row_in_file_index
+ ix = range(m - n, m)
+ rslt = {}
+
+ js, jb = 0, 0
+ for j in range(self.column_count):
+ name = self.column_names[j]
+
+ if self._column_types[j] == b"d":
+ col_arr = self._byte_chunk[jb, :].view(dtype=self.byte_order + "d")
+ rslt[name] = pd.Series(col_arr, dtype=np.float64, index=ix, copy=False)
+ if self.convert_dates:
+ if self.column_formats[j] in const.sas_date_formats:
+ rslt[name] = _convert_datetimes(rslt[name], "d")
+ elif self.column_formats[j] in const.sas_datetime_formats:
+ rslt[name] = _convert_datetimes(rslt[name], "s")
+ jb += 1
+ elif self._column_types[j] == b"s":
+ rslt[name] = pd.Series(self._string_chunk[js, :], index=ix, copy=False)
+ if self.convert_text and (self.encoding is not None):
+ rslt[name] = self._decode_string(rslt[name].str)
+ js += 1
+ else:
+ self.close()
+ raise ValueError(f"unknown column type {repr(self._column_types[j])}")
+
+ df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False)
+ return df
+
+ def _decode_string(self, b):
+ return b.decode(self.encoding or self.default_encoding)
+
+ def _convert_header_text(self, b: bytes) -> str | bytes:
+ if self.convert_header_text:
+ return self._decode_string(b)
+ else:
+ return b
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sas_constants.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sas_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..62c17bd03927e5f852af708e6b9ef6cf7e74d57c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sas_constants.py
@@ -0,0 +1,310 @@
+from __future__ import annotations
+
+from typing import Final
+
+magic: Final = (
+ b"\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"\x00\x00\x00\x00\xc2\xea\x81\x60"
+ b"\xb3\x14\x11\xcf\xbd\x92\x08\x00"
+ b"\x09\xc7\x31\x8c\x18\x1f\x10\x11"
+)
+
+align_1_checker_value: Final = b"3"
+align_1_offset: Final = 32
+align_1_length: Final = 1
+align_1_value: Final = 4
+u64_byte_checker_value: Final = b"3"
+align_2_offset: Final = 35
+align_2_length: Final = 1
+align_2_value: Final = 4
+endianness_offset: Final = 37
+endianness_length: Final = 1
+platform_offset: Final = 39
+platform_length: Final = 1
+encoding_offset: Final = 70
+encoding_length: Final = 1
+dataset_offset: Final = 92
+dataset_length: Final = 64
+file_type_offset: Final = 156
+file_type_length: Final = 8
+date_created_offset: Final = 164
+date_created_length: Final = 8
+date_modified_offset: Final = 172
+date_modified_length: Final = 8
+header_size_offset: Final = 196
+header_size_length: Final = 4
+page_size_offset: Final = 200
+page_size_length: Final = 4
+page_count_offset: Final = 204
+page_count_length: Final = 4
+sas_release_offset: Final = 216
+sas_release_length: Final = 8
+sas_server_type_offset: Final = 224
+sas_server_type_length: Final = 16
+os_version_number_offset: Final = 240
+os_version_number_length: Final = 16
+os_maker_offset: Final = 256
+os_maker_length: Final = 16
+os_name_offset: Final = 272
+os_name_length: Final = 16
+page_bit_offset_x86: Final = 16
+page_bit_offset_x64: Final = 32
+subheader_pointer_length_x86: Final = 12
+subheader_pointer_length_x64: Final = 24
+page_type_offset: Final = 0
+page_type_length: Final = 2
+block_count_offset: Final = 2
+block_count_length: Final = 2
+subheader_count_offset: Final = 4
+subheader_count_length: Final = 2
+page_type_mask: Final = 0x0F00
+# Keep "page_comp_type" bits
+page_type_mask2: Final = 0xF000 | page_type_mask
+page_meta_type: Final = 0x0000
+page_data_type: Final = 0x0100
+page_mix_type: Final = 0x0200
+page_amd_type: Final = 0x0400
+page_meta2_type: Final = 0x4000
+page_comp_type: Final = 0x9000
+page_meta_types: Final = [page_meta_type, page_meta2_type]
+subheader_pointers_offset: Final = 8
+truncated_subheader_id: Final = 1
+compressed_subheader_id: Final = 4
+compressed_subheader_type: Final = 1
+text_block_size_length: Final = 2
+row_length_offset_multiplier: Final = 5
+row_count_offset_multiplier: Final = 6
+col_count_p1_multiplier: Final = 9
+col_count_p2_multiplier: Final = 10
+row_count_on_mix_page_offset_multiplier: Final = 15
+column_name_pointer_length: Final = 8
+column_name_text_subheader_offset: Final = 0
+column_name_text_subheader_length: Final = 2
+column_name_offset_offset: Final = 2
+column_name_offset_length: Final = 2
+column_name_length_offset: Final = 4
+column_name_length_length: Final = 2
+column_data_offset_offset: Final = 8
+column_data_length_offset: Final = 8
+column_data_length_length: Final = 4
+column_type_offset: Final = 14
+column_type_length: Final = 1
+column_format_text_subheader_index_offset: Final = 22
+column_format_text_subheader_index_length: Final = 2
+column_format_offset_offset: Final = 24
+column_format_offset_length: Final = 2
+column_format_length_offset: Final = 26
+column_format_length_length: Final = 2
+column_label_text_subheader_index_offset: Final = 28
+column_label_text_subheader_index_length: Final = 2
+column_label_offset_offset: Final = 30
+column_label_offset_length: Final = 2
+column_label_length_offset: Final = 32
+column_label_length_length: Final = 2
+rle_compression: Final = b"SASYZCRL"
+rdc_compression: Final = b"SASYZCR2"
+
+compression_literals: Final = [rle_compression, rdc_compression]
+
+# Incomplete list of encodings, using SAS nomenclature:
+# https://support.sas.com/documentation/onlinedoc/dfdmstudio/2.6/dmpdmsug/Content/dfU_Encodings_SAS.html
+# corresponding to the Python documentation of standard encodings
+# https://docs.python.org/3/library/codecs.html#standard-encodings
+encoding_names: Final = {
+ 20: "utf-8",
+ 29: "latin1",
+ 30: "latin2",
+ 31: "latin3",
+ 32: "latin4",
+ 33: "cyrillic",
+ 34: "arabic",
+ 35: "greek",
+ 36: "hebrew",
+ 37: "latin5",
+ 38: "latin6",
+ 39: "cp874",
+ 40: "latin9",
+ 41: "cp437",
+ 42: "cp850",
+ 43: "cp852",
+ 44: "cp857",
+ 45: "cp858",
+ 46: "cp862",
+ 47: "cp864",
+ 48: "cp865",
+ 49: "cp866",
+ 50: "cp869",
+ 51: "cp874",
+ # 52: "", # not found
+ # 53: "", # not found
+ # 54: "", # not found
+ 55: "cp720",
+ 56: "cp737",
+ 57: "cp775",
+ 58: "cp860",
+ 59: "cp863",
+ 60: "cp1250",
+ 61: "cp1251",
+ 62: "cp1252",
+ 63: "cp1253",
+ 64: "cp1254",
+ 65: "cp1255",
+ 66: "cp1256",
+ 67: "cp1257",
+ 68: "cp1258",
+ 118: "cp950",
+ # 119: "", # not found
+ 123: "big5",
+ 125: "gb2312",
+ 126: "cp936",
+ 134: "euc_jp",
+ 136: "cp932",
+ 138: "shift_jis",
+ 140: "euc-kr",
+ 141: "cp949",
+ 227: "latin8",
+ # 228: "", # not found
+ # 229: "" # not found
+}
+
+
+class SASIndex:
+ row_size_index: Final = 0
+ column_size_index: Final = 1
+ subheader_counts_index: Final = 2
+ column_text_index: Final = 3
+ column_name_index: Final = 4
+ column_attributes_index: Final = 5
+ format_and_label_index: Final = 6
+ column_list_index: Final = 7
+ data_subheader_index: Final = 8
+
+
+subheader_signature_to_index: Final = {
+ b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
+ b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
+ b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index,
+ b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index,
+ b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
+ b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
+ b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index,
+ b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index,
+ b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index,
+ b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
+ b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
+ b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index,
+ b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
+ b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
+ b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
+ b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index,
+ b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
+ b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
+ b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index,
+ b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
+ b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
+ b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index,
+ b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
+ b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
+}
+
+
+# List of frequently used SAS date and datetime formats
+# http://support.sas.com/documentation/cdl/en/etsug/60372/HTML/default/viewer.htm#etsug_intervals_sect009.htm
+# https://github.com/epam/parso/blob/master/src/main/java/com/epam/parso/impl/SasFileConstants.java
+sas_date_formats: Final = (
+ "DATE",
+ "DAY",
+ "DDMMYY",
+ "DOWNAME",
+ "JULDAY",
+ "JULIAN",
+ "MMDDYY",
+ "MMYY",
+ "MMYYC",
+ "MMYYD",
+ "MMYYP",
+ "MMYYS",
+ "MMYYN",
+ "MONNAME",
+ "MONTH",
+ "MONYY",
+ "QTR",
+ "QTRR",
+ "NENGO",
+ "WEEKDATE",
+ "WEEKDATX",
+ "WEEKDAY",
+ "WEEKV",
+ "WORDDATE",
+ "WORDDATX",
+ "YEAR",
+ "YYMM",
+ "YYMMC",
+ "YYMMD",
+ "YYMMP",
+ "YYMMS",
+ "YYMMN",
+ "YYMON",
+ "YYMMDD",
+ "YYQ",
+ "YYQC",
+ "YYQD",
+ "YYQP",
+ "YYQS",
+ "YYQN",
+ "YYQR",
+ "YYQRC",
+ "YYQRD",
+ "YYQRP",
+ "YYQRS",
+ "YYQRN",
+ "YYMMDDP",
+ "YYMMDDC",
+ "E8601DA",
+ "YYMMDDN",
+ "MMDDYYC",
+ "MMDDYYS",
+ "MMDDYYD",
+ "YYMMDDS",
+ "B8601DA",
+ "DDMMYYN",
+ "YYMMDDD",
+ "DDMMYYB",
+ "DDMMYYP",
+ "MMDDYYP",
+ "YYMMDDB",
+ "MMDDYYN",
+ "DDMMYYC",
+ "DDMMYYD",
+ "DDMMYYS",
+ "MINGUO",
+)
+
+sas_datetime_formats: Final = (
+ "DATETIME",
+ "DTWKDATX",
+ "B8601DN",
+ "B8601DT",
+ "B8601DX",
+ "B8601DZ",
+ "B8601LX",
+ "E8601DN",
+ "E8601DT",
+ "E8601DX",
+ "E8601DZ",
+ "E8601LX",
+ "DATEAMPM",
+ "DTDATE",
+ "DTMONYY",
+ "DTMONYY",
+ "DTWKDATX",
+ "DTYEAR",
+ "TOD",
+ "MDYAMPM",
+)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py
new file mode 100644
index 0000000000000000000000000000000000000000..11b2ed0ee73168ba82e3b8d312f96bcea9398e49
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py
@@ -0,0 +1,508 @@
+"""
+Read a SAS XPort format file into a Pandas DataFrame.
+
+Based on code from Jack Cushman (github.com/jcushman/xport).
+
+The file format is defined here:
+
+https://support.sas.com/content/dam/SAS/support/en/technical-papers/record-layout-of-a-sas-version-5-or-6-data-set-in-sas-transport-xport-format.pdf
+"""
+from __future__ import annotations
+
+from collections import abc
+from datetime import datetime
+import struct
+from typing import TYPE_CHECKING
+import warnings
+
+import numpy as np
+
+from pandas.util._decorators import Appender
+from pandas.util._exceptions import find_stack_level
+
+import pandas as pd
+
+from pandas.io.common import get_handle
+from pandas.io.sas.sasreader import ReaderBase
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ CompressionOptions,
+ DatetimeNaTType,
+ FilePath,
+ ReadBuffer,
+ )
+_correct_line1 = (
+ "HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
+ "000000000000000000000000000000 "
+)
+_correct_header1 = (
+ "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000"
+)
+_correct_header2 = (
+ "HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
+ "000000000000000000000000000000 "
+)
+_correct_obs_header = (
+ "HEADER RECORD*******OBS HEADER RECORD!!!!!!!"
+ "000000000000000000000000000000 "
+)
+_fieldkeys = [
+ "ntype",
+ "nhfun",
+ "field_length",
+ "nvar0",
+ "name",
+ "label",
+ "nform",
+ "nfl",
+ "num_decimals",
+ "nfj",
+ "nfill",
+ "niform",
+ "nifl",
+ "nifd",
+ "npos",
+ "_",
+]
+
+
+_base_params_doc = """\
+Parameters
+----------
+filepath_or_buffer : str or file-like object
+ Path to SAS file or object implementing binary read method."""
+
+_params2_doc = """\
+index : identifier of index column
+ Identifier of column that should be used as index of the DataFrame.
+encoding : str
+ Encoding for text data.
+chunksize : int
+ Read file `chunksize` lines at a time, returns iterator."""
+
+_format_params_doc = """\
+format : str
+ File format, only `xport` is currently supported."""
+
+_iterator_doc = """\
+iterator : bool, default False
+ Return XportReader object for reading file incrementally."""
+
+
+_read_sas_doc = f"""Read a SAS file into a DataFrame.
+
+{_base_params_doc}
+{_format_params_doc}
+{_params2_doc}
+{_iterator_doc}
+
+Returns
+-------
+DataFrame or XportReader
+
+Examples
+--------
+Read a SAS Xport file:
+
+>>> df = pd.read_sas('filename.XPT')
+
+Read a Xport file in 10,000 line chunks:
+
+>>> itr = pd.read_sas('filename.XPT', chunksize=10000)
+>>> for chunk in itr:
+>>> do_something(chunk)
+
+"""
+
+_xport_reader_doc = f"""\
+Class for reading SAS Xport files.
+
+{_base_params_doc}
+{_params2_doc}
+
+Attributes
+----------
+member_info : list
+ Contains information about the file
+fields : list
+ Contains information about the variables in the file
+"""
+
+_read_method_doc = """\
+Read observations from SAS Xport file, returning as data frame.
+
+Parameters
+----------
+nrows : int
+ Number of rows to read from data file; if None, read whole
+ file.
+
+Returns
+-------
+A DataFrame.
+"""
+
+
+def _parse_date(datestr: str) -> DatetimeNaTType:
+ """Given a date in xport format, return Python date."""
+ try:
+ # e.g. "16FEB11:10:07:55"
+ return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
+ except ValueError:
+ return pd.NaT
+
+
+def _split_line(s: str, parts):
+ """
+ Parameters
+ ----------
+ s: str
+ Fixed-length string to split
+ parts: list of (name, length) pairs
+ Used to break up string, name '_' will be filtered from output.
+
+ Returns
+ -------
+ Dict of name:contents of string at given location.
+ """
+ out = {}
+ start = 0
+ for name, length in parts:
+ out[name] = s[start : start + length].strip()
+ start += length
+ del out["_"]
+ return out
+
+
+def _handle_truncated_float_vec(vec, nbytes):
+ # This feature is not well documented, but some SAS XPORT files
+ # have 2-7 byte "truncated" floats. To read these truncated
+ # floats, pad them with zeros on the right to make 8 byte floats.
+ #
+ # References:
+ # https://github.com/jcushman/xport/pull/3
+ # The R "foreign" library
+
+ if nbytes != 8:
+ vec1 = np.zeros(len(vec), np.dtype("S8"))
+ dtype = np.dtype(f"S{nbytes},S{8 - nbytes}")
+ vec2 = vec1.view(dtype=dtype)
+ vec2["f0"] = vec
+ return vec2
+
+ return vec
+
+
+def _parse_float_vec(vec):
+ """
+ Parse a vector of float values representing IBM 8 byte floats into
+ native 8 byte floats.
+ """
+ dtype = np.dtype(">u4,>u4")
+ vec1 = vec.view(dtype=dtype)
+ xport1 = vec1["f0"]
+ xport2 = vec1["f1"]
+
+ # Start by setting first half of ieee number to first half of IBM
+ # number sans exponent
+ ieee1 = xport1 & 0x00FFFFFF
+
+ # The fraction bit to the left of the binary point in the ieee
+ # format was set and the number was shifted 0, 1, 2, or 3
+ # places. This will tell us how to adjust the ibm exponent to be a
+ # power of 2 ieee exponent and how to shift the fraction bits to
+ # restore the correct magnitude.
+ shift = np.zeros(len(vec), dtype=np.uint8)
+ shift[np.where(xport1 & 0x00200000)] = 1
+ shift[np.where(xport1 & 0x00400000)] = 2
+ shift[np.where(xport1 & 0x00800000)] = 3
+
+ # shift the ieee number down the correct number of places then
+ # set the second half of the ieee number to be the second half
+ # of the ibm number shifted appropriately, ored with the bits
+ # from the first half that would have been shifted in if we
+ # could shift a double. All we are worried about are the low
+ # order 3 bits of the first half since we're only shifting by
+ # 1, 2, or 3.
+ ieee1 >>= shift
+ ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
+
+ # clear the 1 bit to the left of the binary point
+ ieee1 &= 0xFFEFFFFF
+
+ # set the exponent of the ieee number to be the actual exponent
+ # plus the shift count + 1023. Or this into the first half of the
+ # ieee number. The ibm exponent is excess 64 but is adjusted by 65
+ # since during conversion to ibm format the exponent is
+ # incremented by 1 and the fraction bits left 4 positions to the
+ # right of the radix point. (had to add >> 24 because C treats &
+ # 0x7f as 0x7f000000 and Python doesn't)
+ ieee1 |= ((((((xport1 >> 24) & 0x7F) - 65) << 2) + shift + 1023) << 20) | (
+ xport1 & 0x80000000
+ )
+
+ ieee = np.empty((len(ieee1),), dtype=">u4,>u4")
+ ieee["f0"] = ieee1
+ ieee["f1"] = ieee2
+ ieee = ieee.view(dtype=">f8")
+ ieee = ieee.astype("f8")
+
+ return ieee
+
+
+class XportReader(ReaderBase, abc.Iterator):
+ __doc__ = _xport_reader_doc
+
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ index=None,
+ encoding: str | None = "ISO-8859-1",
+ chunksize: int | None = None,
+ compression: CompressionOptions = "infer",
+ ) -> None:
+ self._encoding = encoding
+ self._lines_read = 0
+ self._index = index
+ self._chunksize = chunksize
+
+ self.handles = get_handle(
+ filepath_or_buffer,
+ "rb",
+ encoding=encoding,
+ is_text=False,
+ compression=compression,
+ )
+ self.filepath_or_buffer = self.handles.handle
+
+ try:
+ self._read_header()
+ except Exception:
+ self.close()
+ raise
+
+ def close(self) -> None:
+ self.handles.close()
+
+ def _get_row(self):
+ return self.filepath_or_buffer.read(80).decode()
+
+ def _read_header(self) -> None:
+ self.filepath_or_buffer.seek(0)
+
+ # read file header
+ line1 = self._get_row()
+ if line1 != _correct_line1:
+ if "**COMPRESSED**" in line1:
+ # this was created with the PROC CPORT method and can't be read
+ # https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/movefile/p1bm6aqp3fw4uin1hucwh718f6kp.htm
+ raise ValueError(
+ "Header record indicates a CPORT file, which is not readable."
+ )
+ raise ValueError("Header record is not an XPORT file.")
+
+ line2 = self._get_row()
+ fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]]
+ file_info = _split_line(line2, fif)
+ if file_info["prefix"] != "SAS SAS SASLIB":
+ raise ValueError("Header record has invalid prefix.")
+ file_info["created"] = _parse_date(file_info["created"])
+ self.file_info = file_info
+
+ line3 = self._get_row()
+ file_info["modified"] = _parse_date(line3[:16])
+
+ # read member header
+ header1 = self._get_row()
+ header2 = self._get_row()
+ headflag1 = header1.startswith(_correct_header1)
+ headflag2 = header2 == _correct_header2
+ if not (headflag1 and headflag2):
+ raise ValueError("Member header not found")
+ # usually 140, could be 135
+ fieldnamelength = int(header1[-5:-2])
+
+ # member info
+ mem = [
+ ["prefix", 8],
+ ["set_name", 8],
+ ["sasdata", 8],
+ ["version", 8],
+ ["OS", 8],
+ ["_", 24],
+ ["created", 16],
+ ]
+ member_info = _split_line(self._get_row(), mem)
+ mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]]
+ member_info.update(_split_line(self._get_row(), mem))
+ member_info["modified"] = _parse_date(member_info["modified"])
+ member_info["created"] = _parse_date(member_info["created"])
+ self.member_info = member_info
+
+ # read field names
+ types = {1: "numeric", 2: "char"}
+ fieldcount = int(self._get_row()[54:58])
+ datalength = fieldnamelength * fieldcount
+ # round up to nearest 80
+ if datalength % 80:
+ datalength += 80 - datalength % 80
+ fielddata = self.filepath_or_buffer.read(datalength)
+ fields = []
+ obs_length = 0
+ while len(fielddata) >= fieldnamelength:
+ # pull data for one field
+ fieldbytes, fielddata = (
+ fielddata[:fieldnamelength],
+ fielddata[fieldnamelength:],
+ )
+
+ # rest at end gets ignored, so if field is short, pad out
+ # to match struct pattern below
+ fieldbytes = fieldbytes.ljust(140)
+
+ fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", fieldbytes)
+ field = dict(zip(_fieldkeys, fieldstruct))
+ del field["_"]
+ field["ntype"] = types[field["ntype"]]
+ fl = field["field_length"]
+ if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):
+ msg = f"Floating field width {fl} is not between 2 and 8."
+ raise TypeError(msg)
+
+ for k, v in field.items():
+ try:
+ field[k] = v.strip()
+ except AttributeError:
+ pass
+
+ obs_length += field["field_length"]
+ fields += [field]
+
+ header = self._get_row()
+ if not header == _correct_obs_header:
+ raise ValueError("Observation header not found.")
+
+ self.fields = fields
+ self.record_length = obs_length
+ self.record_start = self.filepath_or_buffer.tell()
+
+ self.nobs = self._record_count()
+ self.columns = [x["name"].decode() for x in self.fields]
+
+ # Setup the dtype.
+ dtypel = [
+ ("s" + str(i), "S" + str(field["field_length"]))
+ for i, field in enumerate(self.fields)
+ ]
+ dtype = np.dtype(dtypel)
+ self._dtype = dtype
+
+ def __next__(self) -> pd.DataFrame:
+ return self.read(nrows=self._chunksize or 1)
+
+ def _record_count(self) -> int:
+ """
+ Get number of records in file.
+
+ This is maybe suboptimal because we have to seek to the end of
+ the file.
+
+ Side effect: returns file position to record_start.
+ """
+ self.filepath_or_buffer.seek(0, 2)
+ total_records_length = self.filepath_or_buffer.tell() - self.record_start
+
+ if total_records_length % 80 != 0:
+ warnings.warn(
+ "xport file may be corrupted.",
+ stacklevel=find_stack_level(),
+ )
+
+ if self.record_length > 80:
+ self.filepath_or_buffer.seek(self.record_start)
+ return total_records_length // self.record_length
+
+ self.filepath_or_buffer.seek(-80, 2)
+ last_card_bytes = self.filepath_or_buffer.read(80)
+ last_card = np.frombuffer(last_card_bytes, dtype=np.uint64)
+
+ # 8 byte blank
+ ix = np.flatnonzero(last_card == 2314885530818453536)
+
+ if len(ix) == 0:
+ tail_pad = 0
+ else:
+ tail_pad = 8 * len(ix)
+
+ self.filepath_or_buffer.seek(self.record_start)
+
+ return (total_records_length - tail_pad) // self.record_length
+
+ def get_chunk(self, size: int | None = None) -> pd.DataFrame:
+ """
+ Reads lines from Xport file and returns as dataframe
+
+ Parameters
+ ----------
+ size : int, defaults to None
+ Number of lines to read. If None, reads whole file.
+
+ Returns
+ -------
+ DataFrame
+ """
+ if size is None:
+ size = self._chunksize
+ return self.read(nrows=size)
+
+ def _missing_double(self, vec):
+ v = vec.view(dtype="u1,u1,u2,u4")
+ miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0)
+ miss1 = (
+ ((v["f0"] >= 0x41) & (v["f0"] <= 0x5A))
+ | (v["f0"] == 0x5F)
+ | (v["f0"] == 0x2E)
+ )
+ miss &= miss1
+ return miss
+
+ @Appender(_read_method_doc)
+ def read(self, nrows: int | None = None) -> pd.DataFrame:
+ if nrows is None:
+ nrows = self.nobs
+
+ read_lines = min(nrows, self.nobs - self._lines_read)
+ read_len = read_lines * self.record_length
+ if read_len <= 0:
+ self.close()
+ raise StopIteration
+ raw = self.filepath_or_buffer.read(read_len)
+ data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
+
+ df_data = {}
+ for j, x in enumerate(self.columns):
+ vec = data["s" + str(j)]
+ ntype = self.fields[j]["ntype"]
+ if ntype == "numeric":
+ vec = _handle_truncated_float_vec(vec, self.fields[j]["field_length"])
+ miss = self._missing_double(vec)
+ v = _parse_float_vec(vec)
+ v[miss] = np.nan
+ elif self.fields[j]["ntype"] == "char":
+ v = [y.rstrip() for y in vec]
+
+ if self._encoding is not None:
+ v = [y.decode(self._encoding) for y in v]
+
+ df_data.update({x: v})
+ df = pd.DataFrame(df_data)
+
+ if self._index is None:
+ df.index = pd.Index(range(self._lines_read, self._lines_read + read_lines))
+ else:
+ df = df.set_index(self._index)
+
+ self._lines_read += read_lines
+
+ return df
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sasreader.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sasreader.py
new file mode 100644
index 0000000000000000000000000000000000000000..c39313d5dc6548fcc014f7a886988a2b9d9001ed
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/sas/sasreader.py
@@ -0,0 +1,178 @@
+"""
+Read SAS sas7bdat or xport files.
+"""
+from __future__ import annotations
+
+from abc import (
+ ABC,
+ abstractmethod,
+)
+from typing import (
+ TYPE_CHECKING,
+ overload,
+)
+
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.common import stringify_path
+
+if TYPE_CHECKING:
+ from collections.abc import Hashable
+ from types import TracebackType
+
+ from pandas._typing import (
+ CompressionOptions,
+ FilePath,
+ ReadBuffer,
+ Self,
+ )
+
+ from pandas import DataFrame
+
+
+class ReaderBase(ABC):
+ """
+ Protocol for XportReader and SAS7BDATReader classes.
+ """
+
+ @abstractmethod
+ def read(self, nrows: int | None = None) -> DataFrame:
+ ...
+
+ @abstractmethod
+ def close(self) -> None:
+ ...
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+
+@overload
+def read_sas(
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ *,
+ format: str | None = ...,
+ index: Hashable | None = ...,
+ encoding: str | None = ...,
+ chunksize: int = ...,
+ iterator: bool = ...,
+ compression: CompressionOptions = ...,
+) -> ReaderBase:
+ ...
+
+
+@overload
+def read_sas(
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ *,
+ format: str | None = ...,
+ index: Hashable | None = ...,
+ encoding: str | None = ...,
+ chunksize: None = ...,
+ iterator: bool = ...,
+ compression: CompressionOptions = ...,
+) -> DataFrame | ReaderBase:
+ ...
+
+
+@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer")
+def read_sas(
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ *,
+ format: str | None = None,
+ index: Hashable | None = None,
+ encoding: str | None = None,
+ chunksize: int | None = None,
+ iterator: bool = False,
+ compression: CompressionOptions = "infer",
+) -> DataFrame | ReaderBase:
+ """
+ Read SAS files stored as either XPORT or SAS7BDAT format files.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function. The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be:
+ ``file://localhost/path/to/table.sas7bdat``.
+ format : str {{'xport', 'sas7bdat'}} or None
+ If None, file format is inferred from file extension. If 'xport' or
+ 'sas7bdat', uses the corresponding format.
+ index : identifier of index column, defaults to None
+ Identifier of column that should be used as index of the DataFrame.
+ encoding : str, default is None
+ Encoding for text data. If None, text data are stored as raw bytes.
+ chunksize : int
+ Read file `chunksize` lines at a time, returns iterator.
+ iterator : bool, defaults to False
+ If True, returns an iterator for reading the file incrementally.
+ {decompression_options}
+
+ Returns
+ -------
+ DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
+ or XportReader
+
+ Examples
+ --------
+ >>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP
+ """
+ if format is None:
+ buffer_error_msg = (
+ "If this is a buffer object rather "
+ "than a string name, you must specify a format string"
+ )
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+ if not isinstance(filepath_or_buffer, str):
+ raise ValueError(buffer_error_msg)
+ fname = filepath_or_buffer.lower()
+ if ".xpt" in fname:
+ format = "xport"
+ elif ".sas7bdat" in fname:
+ format = "sas7bdat"
+ else:
+ raise ValueError(
+ f"unable to infer format of SAS file from filename: {repr(fname)}"
+ )
+
+ reader: ReaderBase
+ if format.lower() == "xport":
+ from pandas.io.sas.sas_xport import XportReader
+
+ reader = XportReader(
+ filepath_or_buffer,
+ index=index,
+ encoding=encoding,
+ chunksize=chunksize,
+ compression=compression,
+ )
+ elif format.lower() == "sas7bdat":
+ from pandas.io.sas.sas7bdat import SAS7BDATReader
+
+ reader = SAS7BDATReader(
+ filepath_or_buffer,
+ index=index,
+ encoding=encoding,
+ chunksize=chunksize,
+ compression=compression,
+ )
+ else:
+ raise ValueError("unknown SAS format")
+
+ if iterator or chunksize:
+ return reader
+
+ with reader:
+ return reader.read()
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/io/spss.py b/llmeval-env/lib/python3.10/site-packages/pandas/io/spss.py
new file mode 100644
index 0000000000000000000000000000000000000000..db31a07df79e6de2862e57fd75de0bd4b9c2455d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/io/spss.py
@@ -0,0 +1,72 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.inference import is_list_like
+
+from pandas.io.common import stringify_path
+
+if TYPE_CHECKING:
+ from collections.abc import Sequence
+ from pathlib import Path
+
+ from pandas._typing import DtypeBackend
+
+ from pandas import DataFrame
+
+
+def read_spss(
+ path: str | Path,
+ usecols: Sequence[str] | None = None,
+ convert_categoricals: bool = True,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame:
+ """
+ Load an SPSS file from the file path, returning a DataFrame.
+
+ Parameters
+ ----------
+ path : str or Path
+ File path.
+ usecols : list-like, optional
+ Return a subset of the columns. If None, return all columns.
+ convert_categoricals : bool, default is True
+ Convert categorical columns into pd.Categorical.
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ Returns
+ -------
+ DataFrame
+
+ Examples
+ --------
+ >>> df = pd.read_spss("spss_data.sav") # doctest: +SKIP
+ """
+ pyreadstat = import_optional_dependency("pyreadstat")
+ check_dtype_backend(dtype_backend)
+
+ if usecols is not None:
+ if not is_list_like(usecols):
+ raise TypeError("usecols must be list-like.")
+ usecols = list(usecols) # pyreadstat requires a list
+
+ df, metadata = pyreadstat.read_sav(
+ stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals
+ )
+ df.attrs = metadata.__dict__
+ if dtype_backend is not lib.no_default:
+ df = df.convert_dtypes(dtype_backend=dtype_backend)
+ return df